summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt4
-rw-r--r--src/common/alignment.h37
-rw-r--r--src/common/lz4_compression.cpp6
-rw-r--r--src/common/lz4_compression.h2
-rw-r--r--src/core/CMakeLists.txt45
-rw-r--r--src/core/file_sys/card_image.cpp6
-rw-r--r--src/core/file_sys/content_archive.cpp585
-rw-r--r--src/core/file_sys/content_archive.h66
-rw-r--r--src/core/file_sys/errors.h70
-rw-r--r--src/core/file_sys/fssystem/fs_i_storage.h58
-rw-r--r--src/core/file_sys/fssystem/fs_types.h46
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp251
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h114
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp129
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h43
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp112
-rw-r--r--src/core/file_sys/fssystem/fssystem_aes_xts_storage.h42
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h146
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp204
-rw-r--r--src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h21
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree.cpp598
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree.h416
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h170
-rw-r--r--src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h110
-rw-r--r--src/core/file_sys/fssystem/fssystem_compressed_storage.h963
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_common.h43
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_configuration.cpp36
-rw-r--r--src/core/file_sys/fssystem/fssystem_compression_configuration.h12
-rw-r--r--src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp65
-rw-r--r--src/core/file_sys/fssystem/fssystem_crypto_configuration.h12
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp127
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h164
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp80
-rw-r--r--src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h44
-rw-r--r--src/core/file_sys/fssystem/fssystem_indirect_storage.cpp119
-rw-r--r--src/core/file_sys/fssystem/fssystem_indirect_storage.h294
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp30
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h42
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp91
-rw-r--r--src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h65
-rw-r--r--src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h61
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp1351
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h364
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_header.cpp20
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_header.h338
-rw-r--r--src/core/file_sys/fssystem/fssystem_nca_reader.cpp531
-rw-r--r--src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp61
-rw-r--r--src/core/file_sys/fssystem/fssystem_pooled_buffer.h95
-rw-r--r--src/core/file_sys/fssystem/fssystem_sparse_storage.cpp39
-rw-r--r--src/core/file_sys/fssystem/fssystem_sparse_storage.h72
-rw-r--r--src/core/file_sys/fssystem/fssystem_switch_storage.h80
-rw-r--r--src/core/file_sys/fssystem/fssystem_utility.cpp27
-rw-r--r--src/core/file_sys/fssystem/fssystem_utility.h12
-rw-r--r--src/core/file_sys/nca_patch.cpp217
-rw-r--r--src/core/file_sys/nca_patch.h145
-rw-r--r--src/core/file_sys/patch_manager.cpp48
-rw-r--r--src/core/file_sys/patch_manager.h4
-rw-r--r--src/core/file_sys/registered_cache.cpp8
-rw-r--r--src/core/file_sys/romfs_factory.cpp9
-rw-r--r--src/core/file_sys/romfs_factory.h11
-rw-r--r--src/core/file_sys/submission_package.cpp4
-rw-r--r--src/core/hle/kernel/k_hardware_timer.h9
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp11
-rw-r--r--src/core/hle/kernel/k_resource_limit.h3
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_address_arbiter.cpp3
-rw-r--r--src/core/hle/kernel/svc/svc_condition_variable.cpp3
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp20
-rw-r--r--src/core/hle/kernel/svc/svc_resource_limit.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp16
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp29
-rw-r--r--src/core/hle/service/am/applets/applet_web_browser.cpp2
-rw-r--r--src/core/hle/service/filesystem/filesystem.cpp5
-rw-r--r--src/core/hle/service/filesystem/filesystem.h3
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp3
-rw-r--r--src/core/loader/loader.cpp4
-rw-r--r--src/core/loader/loader.h12
-rw-r--r--src/core/loader/nax.cpp4
-rw-r--r--src/core/loader/nax.h1
-rw-r--r--src/core/loader/nca.cpp28
-rw-r--r--src/core/loader/nca.h1
-rw-r--r--src/core/loader/nsp.cpp4
-rw-r--r--src/core/loader/nsp.h1
-rw-r--r--src/core/loader/xci.cpp4
-rw-r--r--src/core/loader/xci.h1
-rw-r--r--src/video_core/host_shaders/CMakeLists.txt1
-rw-r--r--src/video_core/host_shaders/astc_decoder.comp988
-rw-r--r--src/video_core/host_shaders/vulkan_depthstencil_clear.frag12
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp7
-rw-r--r--src/video_core/renderer_opengl/util_shaders.cpp1
-rw-r--r--src/video_core/renderer_vulkan/blit_image.cpp79
-rw-r--r--src/video_core/renderer_vulkan/blit_image.h19
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp37
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h6
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp8
-rw-r--r--src/yuzu/main.cpp18
98 files changed, 8704 insertions, 1612 deletions
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt
index 0e7c1ba88..25b9d4018 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt
@@ -297,11 +297,11 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
emulationActivity?.let {
it.requestedOrientation = when (IntSetting.RENDERER_SCREEN_LAYOUT.int) {
Settings.LayoutOption_MobileLandscape ->
- ActivityInfo.SCREEN_ORIENTATION_USER_LANDSCAPE
+ ActivityInfo.SCREEN_ORIENTATION_SENSOR_LANDSCAPE
Settings.LayoutOption_MobilePortrait ->
ActivityInfo.SCREEN_ORIENTATION_USER_PORTRAIT
Settings.LayoutOption_Unspecified -> ActivityInfo.SCREEN_ORIENTATION_UNSPECIFIED
- else -> ActivityInfo.SCREEN_ORIENTATION_USER_LANDSCAPE
+ else -> ActivityInfo.SCREEN_ORIENTATION_SENSOR_LANDSCAPE
}
}
}
diff --git a/src/common/alignment.h b/src/common/alignment.h
index fa715d497..fc5c26898 100644
--- a/src/common/alignment.h
+++ b/src/common/alignment.h
@@ -3,6 +3,7 @@
#pragma once
+#include <bit>
#include <cstddef>
#include <new>
#include <type_traits>
@@ -10,8 +11,10 @@
namespace Common {
template <typename T>
- requires std::is_unsigned_v<T>
-[[nodiscard]] constexpr T AlignUp(T value, size_t size) {
+ requires std::is_integral_v<T>
+[[nodiscard]] constexpr T AlignUp(T value_, size_t size) {
+ using U = typename std::make_unsigned_t<T>;
+ auto value{static_cast<U>(value_)};
auto mod{static_cast<T>(value % size)};
value -= mod;
return static_cast<T>(mod == T{0} ? value : value + size);
@@ -24,8 +27,10 @@ template <typename T>
}
template <typename T>
- requires std::is_unsigned_v<T>
-[[nodiscard]] constexpr T AlignDown(T value, size_t size) {
+ requires std::is_integral_v<T>
+[[nodiscard]] constexpr T AlignDown(T value_, size_t size) {
+ using U = typename std::make_unsigned_t<T>;
+ const auto value{static_cast<U>(value_)};
return static_cast<T>(value - value % size);
}
@@ -55,6 +60,30 @@ template <typename T, typename U>
return (x + (y - 1)) / y;
}
+template <typename T>
+ requires std::is_integral_v<T>
+[[nodiscard]] constexpr T LeastSignificantOneBit(T x) {
+ return x & ~(x - 1);
+}
+
+template <typename T>
+ requires std::is_integral_v<T>
+[[nodiscard]] constexpr T ResetLeastSignificantOneBit(T x) {
+ return x & (x - 1);
+}
+
+template <typename T>
+ requires std::is_integral_v<T>
+[[nodiscard]] constexpr bool IsPowerOfTwo(T x) {
+ return x > 0 && ResetLeastSignificantOneBit(x) == 0;
+}
+
+template <typename T>
+ requires std::is_integral_v<T>
+[[nodiscard]] constexpr T FloorPowerOfTwo(T x) {
+ return T{1} << (sizeof(T) * 8 - std::countl_zero(x) - 1);
+}
+
template <typename T, size_t Align = 16>
class AlignmentAllocator {
public:
diff --git a/src/common/lz4_compression.cpp b/src/common/lz4_compression.cpp
index ffb32fecf..d85ab1742 100644
--- a/src/common/lz4_compression.cpp
+++ b/src/common/lz4_compression.cpp
@@ -71,4 +71,10 @@ std::vector<u8> DecompressDataLZ4(std::span<const u8> compressed, std::size_t un
return uncompressed;
}
+int DecompressDataLZ4(void* dst, size_t dst_size, const void* src, size_t src_size) {
+ // This is just a thin wrapper around LZ4.
+ return LZ4_decompress_safe(reinterpret_cast<const char*>(src), reinterpret_cast<char*>(dst),
+ static_cast<int>(src_size), static_cast<int>(dst_size));
+}
+
} // namespace Common::Compression
diff --git a/src/common/lz4_compression.h b/src/common/lz4_compression.h
index 7fd53a960..3ae17c2bb 100644
--- a/src/common/lz4_compression.h
+++ b/src/common/lz4_compression.h
@@ -56,4 +56,6 @@ namespace Common::Compression {
[[nodiscard]] std::vector<u8> DecompressDataLZ4(std::span<const u8> compressed,
std::size_t uncompressed_size);
+[[nodiscard]] int DecompressDataLZ4(void* dst, size_t dst_size, const void* src, size_t src_size);
+
} // namespace Common::Compression
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 4b7395be8..012648d69 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -37,6 +37,49 @@ add_library(core STATIC
debugger/gdbstub.h
device_memory.cpp
device_memory.h
+ file_sys/fssystem/fs_i_storage.h
+ file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
+ file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
+ file_sys/fssystem/fssystem_aes_ctr_storage.cpp
+ file_sys/fssystem/fssystem_aes_ctr_storage.h
+ file_sys/fssystem/fssystem_aes_xts_storage.cpp
+ file_sys/fssystem/fssystem_aes_xts_storage.h
+ file_sys/fssystem/fssystem_alignment_matching_storage.h
+ file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
+ file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
+ file_sys/fssystem/fssystem_bucket_tree.cpp
+ file_sys/fssystem/fssystem_bucket_tree.h
+ file_sys/fssystem/fssystem_bucket_tree_utils.h
+ file_sys/fssystem/fssystem_compressed_storage.h
+ file_sys/fssystem/fssystem_compression_common.h
+ file_sys/fssystem/fssystem_compression_configuration.cpp
+ file_sys/fssystem/fssystem_compression_configuration.h
+ file_sys/fssystem/fssystem_crypto_configuration.cpp
+ file_sys/fssystem/fssystem_crypto_configuration.h
+ file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
+ file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
+ file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
+ file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
+ file_sys/fssystem/fssystem_indirect_storage.cpp
+ file_sys/fssystem/fssystem_indirect_storage.h
+ file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
+ file_sys/fssystem/fssystem_integrity_romfs_storage.h
+ file_sys/fssystem/fssystem_integrity_verification_storage.cpp
+ file_sys/fssystem/fssystem_integrity_verification_storage.h
+ file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
+ file_sys/fssystem/fssystem_nca_file_system_driver.cpp
+ file_sys/fssystem/fssystem_nca_file_system_driver.h
+ file_sys/fssystem/fssystem_nca_header.cpp
+ file_sys/fssystem/fssystem_nca_header.h
+ file_sys/fssystem/fssystem_nca_reader.cpp
+ file_sys/fssystem/fssystem_pooled_buffer.cpp
+ file_sys/fssystem/fssystem_pooled_buffer.h
+ file_sys/fssystem/fssystem_sparse_storage.cpp
+ file_sys/fssystem/fssystem_sparse_storage.h
+ file_sys/fssystem/fssystem_switch_storage.h
+ file_sys/fssystem/fssystem_utility.cpp
+ file_sys/fssystem/fssystem_utility.h
+ file_sys/fssystem/fs_types.h
file_sys/bis_factory.cpp
file_sys/bis_factory.h
file_sys/card_image.cpp
@@ -57,8 +100,6 @@ add_library(core STATIC
file_sys/mode.h
file_sys/nca_metadata.cpp
file_sys/nca_metadata.h
- file_sys/nca_patch.cpp
- file_sys/nca_patch.h
file_sys/partition_filesystem.cpp
file_sys/partition_filesystem.h
file_sys/patch_manager.cpp
diff --git a/src/core/file_sys/card_image.cpp b/src/core/file_sys/card_image.cpp
index 3e667e74a..8b9a4fc5a 100644
--- a/src/core/file_sys/card_image.cpp
+++ b/src/core/file_sys/card_image.cpp
@@ -179,9 +179,9 @@ u32 XCI::GetSystemUpdateVersion() {
}
for (const auto& update_file : update->GetFiles()) {
- NCA nca{update_file, nullptr, 0};
+ NCA nca{update_file};
- if (nca.GetStatus() != Loader::ResultStatus::Success) {
+ if (nca.GetStatus() != Loader::ResultStatus::Success || nca.GetSubdirectories().empty()) {
continue;
}
@@ -292,7 +292,7 @@ Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
continue;
}
- auto nca = std::make_shared<NCA>(partition_file, nullptr, 0);
+ auto nca = std::make_shared<NCA>(partition_file);
if (nca->IsUpdate()) {
continue;
}
diff --git a/src/core/file_sys/content_archive.cpp b/src/core/file_sys/content_archive.cpp
index 06efab46d..44e6852fe 100644
--- a/src/core/file_sys/content_archive.cpp
+++ b/src/core/file_sys/content_archive.cpp
@@ -12,545 +12,109 @@
#include "core/crypto/ctr_encryption_layer.h"
#include "core/crypto/key_manager.h"
#include "core/file_sys/content_archive.h"
-#include "core/file_sys/nca_patch.h"
#include "core/file_sys/partition_filesystem.h"
#include "core/file_sys/vfs_offset.h"
#include "core/loader/loader.h"
-namespace FileSys {
+#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
+#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
+#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
-// Media offsets in headers are stored divided by 512. Mult. by this to get real offset.
-constexpr u64 MEDIA_OFFSET_MULTIPLIER = 0x200;
-
-constexpr u64 SECTION_HEADER_SIZE = 0x200;
-constexpr u64 SECTION_HEADER_OFFSET = 0x400;
-
-constexpr u32 IVFC_MAX_LEVEL = 6;
-
-enum class NCASectionFilesystemType : u8 {
- PFS0 = 0x2,
- ROMFS = 0x3,
-};
-
-struct IVFCLevel {
- u64_le offset;
- u64_le size;
- u32_le block_size;
- u32_le reserved;
-};
-static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
-
-struct IVFCHeader {
- u32_le magic;
- u32_le magic_number;
- INSERT_PADDING_BYTES_NOINIT(8);
- std::array<IVFCLevel, 6> levels;
- INSERT_PADDING_BYTES_NOINIT(64);
-};
-static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
-
-struct NCASectionHeaderBlock {
- INSERT_PADDING_BYTES_NOINIT(3);
- NCASectionFilesystemType filesystem_type;
- NCASectionCryptoType crypto_type;
- INSERT_PADDING_BYTES_NOINIT(3);
-};
-static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
-
-struct NCABucketInfo {
- u64 table_offset;
- u64 table_size;
- std::array<u8, 0x10> table_header;
-};
-static_assert(sizeof(NCABucketInfo) == 0x20, "NCABucketInfo has incorrect size.");
-
-struct NCASparseInfo {
- NCABucketInfo bucket;
- u64 physical_offset;
- u16 generation;
- INSERT_PADDING_BYTES_NOINIT(0x6);
-};
-static_assert(sizeof(NCASparseInfo) == 0x30, "NCASparseInfo has incorrect size.");
-
-struct NCACompressionInfo {
- NCABucketInfo bucket;
- INSERT_PADDING_BYTES_NOINIT(0x8);
-};
-static_assert(sizeof(NCACompressionInfo) == 0x28, "NCACompressionInfo has incorrect size.");
-
-struct NCASectionRaw {
- NCASectionHeaderBlock header;
- std::array<u8, 0x138> block_data;
- std::array<u8, 0x8> section_ctr;
- NCASparseInfo sparse_info;
- NCACompressionInfo compression_info;
- INSERT_PADDING_BYTES_NOINIT(0x60);
-};
-static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
-
-struct PFS0Superblock {
- NCASectionHeaderBlock header_block;
- std::array<u8, 0x20> hash;
- u32_le size;
- INSERT_PADDING_BYTES_NOINIT(4);
- u64_le hash_table_offset;
- u64_le hash_table_size;
- u64_le pfs0_header_offset;
- u64_le pfs0_size;
- INSERT_PADDING_BYTES_NOINIT(0x1B0);
-};
-static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
-
-struct RomFSSuperblock {
- NCASectionHeaderBlock header_block;
- IVFCHeader ivfc;
- INSERT_PADDING_BYTES_NOINIT(0x118);
-};
-static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
-
-struct BKTRHeader {
- u64_le offset;
- u64_le size;
- u32_le magic;
- INSERT_PADDING_BYTES_NOINIT(0x4);
- u32_le number_entries;
- INSERT_PADDING_BYTES_NOINIT(0x4);
-};
-static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
-
-struct BKTRSuperblock {
- NCASectionHeaderBlock header_block;
- IVFCHeader ivfc;
- INSERT_PADDING_BYTES_NOINIT(0x18);
- BKTRHeader relocation;
- BKTRHeader subsection;
- INSERT_PADDING_BYTES_NOINIT(0xC0);
-};
-static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");
-
-union NCASectionHeader {
- NCASectionRaw raw{};
- PFS0Superblock pfs0;
- RomFSSuperblock romfs;
- BKTRSuperblock bktr;
-};
-static_assert(sizeof(NCASectionHeader) == 0x200, "NCASectionHeader has incorrect size.");
-
-static bool IsValidNCA(const NCAHeader& header) {
- // TODO(DarkLordZach): Add NCA2/NCA0 support.
- return header.magic == Common::MakeMagic('N', 'C', 'A', '3');
-}
+namespace FileSys {
-NCA::NCA(VirtualFile file_, VirtualFile bktr_base_romfs_, u64 bktr_base_ivfc_offset)
- : file(std::move(file_)),
- bktr_base_romfs(std::move(bktr_base_romfs_)), keys{Core::Crypto::KeyManager::Instance()} {
+NCA::NCA(VirtualFile file_, const NCA* base_nca)
+ : file(std::move(file_)), keys{Core::Crypto::KeyManager::Instance()} {
if (file == nullptr) {
status = Loader::ResultStatus::ErrorNullFile;
return;
}
- if (sizeof(NCAHeader) != file->ReadObject(&header)) {
- LOG_ERROR(Loader, "File reader errored out during header read.");
+ reader = std::make_shared<NcaReader>();
+ if (Result rc =
+ reader->Initialize(file, GetCryptoConfiguration(), GetNcaCompressionConfiguration());
+ R_FAILED(rc)) {
+ if (rc != ResultInvalidNcaSignature) {
+ LOG_ERROR(Loader, "File reader errored out during header read: {:#x}",
+ rc.GetInnerValue());
+ }
status = Loader::ResultStatus::ErrorBadNCAHeader;
return;
}
- if (!HandlePotentialHeaderDecryption()) {
- return;
- }
-
- has_rights_id = std::ranges::any_of(header.rights_id, [](char c) { return c != '\0'; });
-
- const std::vector<NCASectionHeader> sections = ReadSectionHeaders();
- is_update = std::ranges::any_of(sections, [](const NCASectionHeader& nca_header) {
- return nca_header.raw.header.crypto_type == NCASectionCryptoType::BKTR;
- });
-
- if (!ReadSections(sections, bktr_base_ivfc_offset)) {
- return;
- }
-
- status = Loader::ResultStatus::Success;
-}
-
-NCA::~NCA() = default;
-
-bool NCA::CheckSupportedNCA(const NCAHeader& nca_header) {
- if (nca_header.magic == Common::MakeMagic('N', 'C', 'A', '2')) {
- status = Loader::ResultStatus::ErrorNCA2;
- return false;
- }
-
- if (nca_header.magic == Common::MakeMagic('N', 'C', 'A', '0')) {
- status = Loader::ResultStatus::ErrorNCA0;
- return false;
- }
-
- return true;
-}
+ RightsId rights_id{};
+ reader->GetRightsId(rights_id.data(), rights_id.size());
+ if (rights_id != RightsId{}) {
+ // External decryption key required; provide it here.
+ const auto key_generation = std::max<s32>(reader->GetKeyGeneration(), 1) - 1;
-bool NCA::HandlePotentialHeaderDecryption() {
- if (IsValidNCA(header)) {
- return true;
- }
-
- if (!CheckSupportedNCA(header)) {
- return false;
- }
+ u128 rights_id_u128;
+ std::memcpy(rights_id_u128.data(), rights_id.data(), sizeof(rights_id));
- NCAHeader dec_header{};
- Core::Crypto::AESCipher<Core::Crypto::Key256> cipher(
- keys.GetKey(Core::Crypto::S256KeyType::Header), Core::Crypto::Mode::XTS);
- cipher.XTSTranscode(&header, sizeof(NCAHeader), &dec_header, 0, 0x200,
- Core::Crypto::Op::Decrypt);
- if (IsValidNCA(dec_header)) {
- header = dec_header;
- encrypted = true;
- } else {
- if (!CheckSupportedNCA(dec_header)) {
- return false;
+ auto titlekey =
+ keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id_u128[1], rights_id_u128[0]);
+ if (titlekey == Core::Crypto::Key128{}) {
+ status = Loader::ResultStatus::ErrorMissingTitlekey;
+ return;
}
- if (keys.HasKey(Core::Crypto::S256KeyType::Header)) {
- status = Loader::ResultStatus::ErrorIncorrectHeaderKey;
- } else {
- status = Loader::ResultStatus::ErrorMissingHeaderKey;
+ if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, key_generation)) {
+ status = Loader::ResultStatus::ErrorMissingTitlekek;
+ return;
}
- return false;
- }
- return true;
-}
+ auto titlekek = keys.GetKey(Core::Crypto::S128KeyType::Titlekek, key_generation);
+ Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(titlekek, Core::Crypto::Mode::ECB);
+ cipher.Transcode(titlekey.data(), titlekey.size(), titlekey.data(),
+ Core::Crypto::Op::Decrypt);
-std::vector<NCASectionHeader> NCA::ReadSectionHeaders() const {
- const std::ptrdiff_t number_sections =
- std::ranges::count_if(header.section_tables, [](const NCASectionTableEntry& entry) {
- return entry.media_offset > 0;
- });
-
- std::vector<NCASectionHeader> sections(number_sections);
- const auto length_sections = SECTION_HEADER_SIZE * number_sections;
-
- if (encrypted) {
- auto raw = file->ReadBytes(length_sections, SECTION_HEADER_OFFSET);
- Core::Crypto::AESCipher<Core::Crypto::Key256> cipher(
- keys.GetKey(Core::Crypto::S256KeyType::Header), Core::Crypto::Mode::XTS);
- cipher.XTSTranscode(raw.data(), length_sections, sections.data(), 2, SECTION_HEADER_SIZE,
- Core::Crypto::Op::Decrypt);
- } else {
- file->ReadBytes(sections.data(), length_sections, SECTION_HEADER_OFFSET);
+ reader->SetExternalDecryptionKey(titlekey.data(), titlekey.size());
}
- return sections;
-}
-
-bool NCA::ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_base_ivfc_offset) {
- for (std::size_t i = 0; i < sections.size(); ++i) {
- const auto& section = sections[i];
-
- if (section.raw.sparse_info.bucket.table_offset != 0 &&
- section.raw.sparse_info.bucket.table_size != 0) {
- LOG_ERROR(Loader, "Sparse NCAs are not supported.");
- status = Loader::ResultStatus::ErrorSparseNCA;
- return false;
- }
-
- if (section.raw.compression_info.bucket.table_offset != 0 &&
- section.raw.compression_info.bucket.table_size != 0) {
- LOG_ERROR(Loader, "Compressed NCAs are not supported.");
- status = Loader::ResultStatus::ErrorCompressedNCA;
- return false;
- }
-
- if (section.raw.header.filesystem_type == NCASectionFilesystemType::ROMFS) {
- if (!ReadRomFSSection(section, header.section_tables[i], bktr_base_ivfc_offset)) {
- return false;
- }
- } else if (section.raw.header.filesystem_type == NCASectionFilesystemType::PFS0) {
- if (!ReadPFS0Section(section, header.section_tables[i])) {
- return false;
- }
- }
- }
-
- return true;
-}
-
-bool NCA::ReadRomFSSection(const NCASectionHeader& section, const NCASectionTableEntry& entry,
- u64 bktr_base_ivfc_offset) {
- const std::size_t base_offset = entry.media_offset * MEDIA_OFFSET_MULTIPLIER;
- ivfc_offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
- const std::size_t romfs_offset = base_offset + ivfc_offset;
- const std::size_t romfs_size = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].size;
- auto raw = std::make_shared<OffsetVfsFile>(file, romfs_size, romfs_offset);
- auto dec = Decrypt(section, raw, romfs_offset);
-
- if (dec == nullptr) {
- if (status != Loader::ResultStatus::Success)
- return false;
- if (has_rights_id)
- status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
- else
- status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
- return false;
- }
-
- if (section.raw.header.crypto_type == NCASectionCryptoType::BKTR) {
- if (section.bktr.relocation.magic != Common::MakeMagic('B', 'K', 'T', 'R') ||
- section.bktr.subsection.magic != Common::MakeMagic('B', 'K', 'T', 'R')) {
- status = Loader::ResultStatus::ErrorBadBKTRHeader;
- return false;
- }
-
- if (section.bktr.relocation.offset + section.bktr.relocation.size !=
- section.bktr.subsection.offset) {
- status = Loader::ResultStatus::ErrorBKTRSubsectionNotAfterRelocation;
- return false;
- }
-
- const u64 size = MEDIA_OFFSET_MULTIPLIER * (entry.media_end_offset - entry.media_offset);
- if (section.bktr.subsection.offset + section.bktr.subsection.size != size) {
- status = Loader::ResultStatus::ErrorBKTRSubsectionNotAtEnd;
- return false;
- }
-
- const u64 offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
- RelocationBlock relocation_block{};
- if (dec->ReadObject(&relocation_block, section.bktr.relocation.offset - offset) !=
- sizeof(RelocationBlock)) {
- status = Loader::ResultStatus::ErrorBadRelocationBlock;
- return false;
- }
- SubsectionBlock subsection_block{};
- if (dec->ReadObject(&subsection_block, section.bktr.subsection.offset - offset) !=
- sizeof(RelocationBlock)) {
- status = Loader::ResultStatus::ErrorBadSubsectionBlock;
- return false;
- }
-
- std::vector<RelocationBucketRaw> relocation_buckets_raw(
- (section.bktr.relocation.size - sizeof(RelocationBlock)) / sizeof(RelocationBucketRaw));
- if (dec->ReadBytes(relocation_buckets_raw.data(),
- section.bktr.relocation.size - sizeof(RelocationBlock),
- section.bktr.relocation.offset + sizeof(RelocationBlock) - offset) !=
- section.bktr.relocation.size - sizeof(RelocationBlock)) {
- status = Loader::ResultStatus::ErrorBadRelocationBuckets;
- return false;
+ const s32 fs_count = reader->GetFsCount();
+ NcaFileSystemDriver fs(base_nca ? base_nca->reader : nullptr, reader);
+ std::vector<VirtualFile> filesystems(fs_count);
+ for (s32 i = 0; i < fs_count; i++) {
+ NcaFsHeaderReader header_reader;
+ const Result rc = fs.OpenStorage(&filesystems[i], &header_reader, i);
+ if (R_FAILED(rc)) {
+ LOG_ERROR(Loader, "File reader errored out during read of section {}: {:#x}", i,
+ rc.GetInnerValue());
+ status = Loader::ResultStatus::ErrorBadNCAHeader;
+ return;
}
- std::vector<SubsectionBucketRaw> subsection_buckets_raw(
- (section.bktr.subsection.size - sizeof(SubsectionBlock)) / sizeof(SubsectionBucketRaw));
- if (dec->ReadBytes(subsection_buckets_raw.data(),
- section.bktr.subsection.size - sizeof(SubsectionBlock),
- section.bktr.subsection.offset + sizeof(SubsectionBlock) - offset) !=
- section.bktr.subsection.size - sizeof(SubsectionBlock)) {
- status = Loader::ResultStatus::ErrorBadSubsectionBuckets;
- return false;
+ if (header_reader.GetFsType() == NcaFsHeader::FsType::RomFs) {
+ files.push_back(filesystems[i]);
+ romfs = files.back();
}
- std::vector<RelocationBucket> relocation_buckets(relocation_buckets_raw.size());
- std::ranges::transform(relocation_buckets_raw, relocation_buckets.begin(),
- &ConvertRelocationBucketRaw);
- std::vector<SubsectionBucket> subsection_buckets(subsection_buckets_raw.size());
- std::ranges::transform(subsection_buckets_raw, subsection_buckets.begin(),
- &ConvertSubsectionBucketRaw);
-
- u32 ctr_low;
- std::memcpy(&ctr_low, section.raw.section_ctr.data(), sizeof(ctr_low));
- subsection_buckets.back().entries.push_back({section.bktr.relocation.offset, {0}, ctr_low});
- subsection_buckets.back().entries.push_back({size, {0}, 0});
-
- std::optional<Core::Crypto::Key128> key;
- if (encrypted) {
- if (has_rights_id) {
- status = Loader::ResultStatus::Success;
- key = GetTitlekey();
- if (!key) {
- status = Loader::ResultStatus::ErrorMissingTitlekey;
- return false;
- }
- } else {
- key = GetKeyAreaKey(NCASectionCryptoType::BKTR);
- if (!key) {
- status = Loader::ResultStatus::ErrorMissingKeyAreaKey;
- return false;
+ if (header_reader.GetFsType() == NcaFsHeader::FsType::PartitionFs) {
+ auto npfs = std::make_shared<PartitionFilesystem>(filesystems[i]);
+ if (npfs->GetStatus() == Loader::ResultStatus::Success) {
+ dirs.push_back(npfs);
+ if (IsDirectoryExeFS(npfs)) {
+ exefs = dirs.back();
+ } else if (IsDirectoryLogoPartition(npfs)) {
+ logo = dirs.back();
+ } else {
+ continue;
}
}
}
- if (bktr_base_romfs == nullptr) {
- status = Loader::ResultStatus::ErrorMissingBKTRBaseRomFS;
- return false;
+ if (header_reader.GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx) {
+ is_update = true;
}
-
- auto bktr = std::make_shared<BKTR>(
- bktr_base_romfs, std::make_shared<OffsetVfsFile>(file, romfs_size, base_offset),
- relocation_block, relocation_buckets, subsection_block, subsection_buckets, encrypted,
- encrypted ? *key : Core::Crypto::Key128{}, base_offset, bktr_base_ivfc_offset,
- section.raw.section_ctr);
-
- // BKTR applies to entire IVFC, so make an offset version to level 6
- files.push_back(std::make_shared<OffsetVfsFile>(
- bktr, romfs_size, section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset));
- } else {
- files.push_back(std::move(dec));
}
- romfs = files.back();
- return true;
-}
-
-bool NCA::ReadPFS0Section(const NCASectionHeader& section, const NCASectionTableEntry& entry) {
- const u64 offset = (static_cast<u64>(entry.media_offset) * MEDIA_OFFSET_MULTIPLIER) +
- section.pfs0.pfs0_header_offset;
- const u64 size = MEDIA_OFFSET_MULTIPLIER * (entry.media_end_offset - entry.media_offset);
-
- auto dec = Decrypt(section, std::make_shared<OffsetVfsFile>(file, size, offset), offset);
- if (dec != nullptr) {
- auto npfs = std::make_shared<PartitionFilesystem>(std::move(dec));
-
- if (npfs->GetStatus() == Loader::ResultStatus::Success) {
- dirs.push_back(std::move(npfs));
- if (IsDirectoryExeFS(dirs.back()))
- exefs = dirs.back();
- else if (IsDirectoryLogoPartition(dirs.back()))
- logo = dirs.back();
- } else {
- if (has_rights_id)
- status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
- else
- status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
- return false;
- }
+ if (is_update && base_nca == nullptr) {
+ status = Loader::ResultStatus::ErrorMissingBKTRBaseRomFS;
} else {
- if (status != Loader::ResultStatus::Success)
- return false;
- if (has_rights_id)
- status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
- else
- status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
- return false;
+ status = Loader::ResultStatus::Success;
}
-
- return true;
-}
-
-u8 NCA::GetCryptoRevision() const {
- u8 master_key_id = header.crypto_type;
- if (header.crypto_type_2 > master_key_id)
- master_key_id = header.crypto_type_2;
- if (master_key_id > 0)
- --master_key_id;
- return master_key_id;
}
-std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type) const {
- const auto master_key_id = GetCryptoRevision();
-
- if (!keys.HasKey(Core::Crypto::S128KeyType::KeyArea, master_key_id, header.key_index)) {
- return std::nullopt;
- }
-
- std::vector<u8> key_area(header.key_area.begin(), header.key_area.end());
- Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
- keys.GetKey(Core::Crypto::S128KeyType::KeyArea, master_key_id, header.key_index),
- Core::Crypto::Mode::ECB);
- cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
-
- Core::Crypto::Key128 out{};
- if (type == NCASectionCryptoType::XTS) {
- std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
- } else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {
- std::copy(key_area.begin() + 0x20, key_area.begin() + 0x30, out.begin());
- } else {
- LOG_CRITICAL(Crypto, "Called GetKeyAreaKey on invalid NCASectionCryptoType type={:02X}",
- type);
- }
-
- u128 out_128{};
- std::memcpy(out_128.data(), out.data(), sizeof(u128));
- LOG_TRACE(Crypto, "called with crypto_rev={:02X}, kak_index={:02X}, key={:016X}{:016X}",
- master_key_id, header.key_index, out_128[1], out_128[0]);
-
- return out;
-}
-
-std::optional<Core::Crypto::Key128> NCA::GetTitlekey() {
- const auto master_key_id = GetCryptoRevision();
-
- u128 rights_id{};
- memcpy(rights_id.data(), header.rights_id.data(), 16);
- if (rights_id == u128{}) {
- status = Loader::ResultStatus::ErrorInvalidRightsID;
- return std::nullopt;
- }
-
- auto titlekey = keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id[1], rights_id[0]);
- if (titlekey == Core::Crypto::Key128{}) {
- status = Loader::ResultStatus::ErrorMissingTitlekey;
- return std::nullopt;
- }
-
- if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, master_key_id)) {
- status = Loader::ResultStatus::ErrorMissingTitlekek;
- return std::nullopt;
- }
-
- Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
- keys.GetKey(Core::Crypto::S128KeyType::Titlekek, master_key_id), Core::Crypto::Mode::ECB);
- cipher.Transcode(titlekey.data(), titlekey.size(), titlekey.data(), Core::Crypto::Op::Decrypt);
-
- return titlekey;
-}
-
-VirtualFile NCA::Decrypt(const NCASectionHeader& s_header, VirtualFile in, u64 starting_offset) {
- if (!encrypted)
- return in;
-
- switch (s_header.raw.header.crypto_type) {
- case NCASectionCryptoType::NONE:
- LOG_TRACE(Crypto, "called with mode=NONE");
- return in;
- case NCASectionCryptoType::CTR:
- // During normal BKTR decryption, this entire function is skipped. This is for the metadata,
- // which uses the same CTR as usual.
- case NCASectionCryptoType::BKTR:
- LOG_TRACE(Crypto, "called with mode=CTR, starting_offset={:016X}", starting_offset);
- {
- std::optional<Core::Crypto::Key128> key;
- if (has_rights_id) {
- status = Loader::ResultStatus::Success;
- key = GetTitlekey();
- if (!key) {
- if (status == Loader::ResultStatus::Success)
- status = Loader::ResultStatus::ErrorMissingTitlekey;
- return nullptr;
- }
- } else {
- key = GetKeyAreaKey(NCASectionCryptoType::CTR);
- if (!key) {
- status = Loader::ResultStatus::ErrorMissingKeyAreaKey;
- return nullptr;
- }
- }
-
- auto out = std::make_shared<Core::Crypto::CTREncryptionLayer>(std::move(in), *key,
- starting_offset);
- Core::Crypto::CTREncryptionLayer::IVData iv{};
- for (std::size_t i = 0; i < 8; ++i) {
- iv[i] = s_header.raw.section_ctr[8 - i - 1];
- }
- out->SetIV(iv);
- return std::static_pointer_cast<VfsFile>(out);
- }
- case NCASectionCryptoType::XTS:
- // TODO(DarkLordZach): Find a test case for XTS-encrypted NCAs
- default:
- LOG_ERROR(Crypto, "called with unhandled crypto type={:02X}",
- s_header.raw.header.crypto_type);
- return nullptr;
- }
-}
+NCA::~NCA() = default;
Loader::ResultStatus NCA::GetStatus() const {
return status;
@@ -579,21 +143,24 @@ VirtualDir NCA::GetParentDirectory() const {
}
NCAContentType NCA::GetType() const {
- return header.content_type;
+ return static_cast<NCAContentType>(reader->GetContentType());
}
u64 NCA::GetTitleId() const {
- if (is_update || status == Loader::ResultStatus::ErrorMissingBKTRBaseRomFS)
- return header.title_id | 0x800;
- return header.title_id;
+ if (is_update) {
+ return reader->GetProgramId() | 0x800;
+ }
+ return reader->GetProgramId();
}
-std::array<u8, 16> NCA::GetRightsId() const {
- return header.rights_id;
+RightsId NCA::GetRightsId() const {
+ RightsId result;
+ reader->GetRightsId(result.data(), result.size());
+ return result;
}
u32 NCA::GetSDKVersion() const {
- return header.sdk_version;
+ return reader->GetSdkAddonVersion();
}
bool NCA::IsUpdate() const {
@@ -612,10 +179,6 @@ VirtualFile NCA::GetBaseFile() const {
return file;
}
-u64 NCA::GetBaseIVFCOffset() const {
- return ivfc_offset;
-}
-
VirtualDir NCA::GetLogoPartition() const {
return logo;
}
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h
index 20f524f80..af521d453 100644
--- a/src/core/file_sys/content_archive.h
+++ b/src/core/file_sys/content_archive.h
@@ -21,7 +21,7 @@ enum class ResultStatus : u16;
namespace FileSys {
-union NCASectionHeader;
+class NcaReader;
/// Describes the type of content within an NCA archive.
enum class NCAContentType : u8 {
@@ -45,41 +45,7 @@ enum class NCAContentType : u8 {
PublicData = 5,
};
-enum class NCASectionCryptoType : u8 {
- NONE = 1,
- XTS = 2,
- CTR = 3,
- BKTR = 4,
-};
-
-struct NCASectionTableEntry {
- u32_le media_offset;
- u32_le media_end_offset;
- INSERT_PADDING_BYTES(0x8);
-};
-static_assert(sizeof(NCASectionTableEntry) == 0x10, "NCASectionTableEntry has incorrect size.");
-
-struct NCAHeader {
- std::array<u8, 0x100> rsa_signature_1;
- std::array<u8, 0x100> rsa_signature_2;
- u32_le magic;
- u8 is_system;
- NCAContentType content_type;
- u8 crypto_type;
- u8 key_index;
- u64_le size;
- u64_le title_id;
- INSERT_PADDING_BYTES(0x4);
- u32_le sdk_version;
- u8 crypto_type_2;
- INSERT_PADDING_BYTES(15);
- std::array<u8, 0x10> rights_id;
- std::array<NCASectionTableEntry, 0x4> section_tables;
- std::array<std::array<u8, 0x20>, 0x4> hash_tables;
- std::array<u8, 0x40> key_area;
- INSERT_PADDING_BYTES(0xC0);
-};
-static_assert(sizeof(NCAHeader) == 0x400, "NCAHeader has incorrect size.");
+using RightsId = std::array<u8, 0x10>;
inline bool IsDirectoryExeFS(const VirtualDir& pfs) {
// According to switchbrew, an exefs must only contain these two files:
@@ -97,8 +63,7 @@ inline bool IsDirectoryLogoPartition(const VirtualDir& pfs) {
// After construction, use GetStatus to determine if the file is valid and ready to be used.
class NCA : public ReadOnlyVfsDirectory {
public:
- explicit NCA(VirtualFile file, VirtualFile bktr_base_romfs = nullptr,
- u64 bktr_base_ivfc_offset = 0);
+ explicit NCA(VirtualFile file, const NCA* base_nca = nullptr);
~NCA() override;
Loader::ResultStatus GetStatus() const;
@@ -110,7 +75,7 @@ public:
NCAContentType GetType() const;
u64 GetTitleId() const;
- std::array<u8, 0x10> GetRightsId() const;
+ RightsId GetRightsId() const;
u32 GetSDKVersion() const;
bool IsUpdate() const;
@@ -119,26 +84,9 @@ public:
VirtualFile GetBaseFile() const;
- // Returns the base ivfc offset used in BKTR patching.
- u64 GetBaseIVFCOffset() const;
-
VirtualDir GetLogoPartition() const;
private:
- bool CheckSupportedNCA(const NCAHeader& header);
- bool HandlePotentialHeaderDecryption();
-
- std::vector<NCASectionHeader> ReadSectionHeaders() const;
- bool ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_base_ivfc_offset);
- bool ReadRomFSSection(const NCASectionHeader& section, const NCASectionTableEntry& entry,
- u64 bktr_base_ivfc_offset);
- bool ReadPFS0Section(const NCASectionHeader& section, const NCASectionTableEntry& entry);
-
- u8 GetCryptoRevision() const;
- std::optional<Core::Crypto::Key128> GetKeyAreaKey(NCASectionCryptoType type) const;
- std::optional<Core::Crypto::Key128> GetTitlekey();
- VirtualFile Decrypt(const NCASectionHeader& header, VirtualFile in, u64 starting_offset);
-
std::vector<VirtualDir> dirs;
std::vector<VirtualFile> files;
@@ -146,11 +94,6 @@ private:
VirtualDir exefs = nullptr;
VirtualDir logo = nullptr;
VirtualFile file;
- VirtualFile bktr_base_romfs;
- u64 ivfc_offset = 0;
-
- NCAHeader header{};
- bool has_rights_id{};
Loader::ResultStatus status{};
@@ -158,6 +101,7 @@ private:
bool is_update = false;
Core::Crypto::KeyManager& keys;
+ std::shared_ptr<NcaReader> reader;
};
} // namespace FileSys
diff --git a/src/core/file_sys/errors.h b/src/core/file_sys/errors.h
index 7cee0c7df..2f5045a67 100644
--- a/src/core/file_sys/errors.h
+++ b/src/core/file_sys/errors.h
@@ -17,4 +17,74 @@ constexpr Result ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
+constexpr Result ResultUnsupportedSdkVersion{ErrorModule::FS, 50};
+constexpr Result ResultPartitionNotFound{ErrorModule::FS, 1001};
+constexpr Result ResultUnsupportedVersion{ErrorModule::FS, 3002};
+constexpr Result ResultOutOfRange{ErrorModule::FS, 3005};
+constexpr Result ResultAllocationMemoryFailedInFileSystemBuddyHeapA{ErrorModule::FS, 3294};
+constexpr Result ResultAllocationMemoryFailedInNcaFileSystemDriverI{ErrorModule::FS, 3341};
+constexpr Result ResultAllocationMemoryFailedInNcaReaderA{ErrorModule::FS, 3363};
+constexpr Result ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA{ErrorModule::FS, 3399};
+constexpr Result ResultAllocationMemoryFailedInIntegrityRomFsStorageA{ErrorModule::FS, 3412};
+constexpr Result ResultAllocationMemoryFailedMakeUnique{ErrorModule::FS, 3422};
+constexpr Result ResultAllocationMemoryFailedAllocateShared{ErrorModule::FS, 3423};
+constexpr Result ResultInvalidAesCtrCounterExtendedEntryOffset{ErrorModule::FS, 4012};
+constexpr Result ResultIndirectStorageCorrupted{ErrorModule::FS, 4021};
+constexpr Result ResultInvalidIndirectEntryOffset{ErrorModule::FS, 4022};
+constexpr Result ResultInvalidIndirectEntryStorageIndex{ErrorModule::FS, 4023};
+constexpr Result ResultInvalidIndirectStorageSize{ErrorModule::FS, 4024};
+constexpr Result ResultInvalidBucketTreeSignature{ErrorModule::FS, 4032};
+constexpr Result ResultInvalidBucketTreeEntryCount{ErrorModule::FS, 4033};
+constexpr Result ResultInvalidBucketTreeNodeEntryCount{ErrorModule::FS, 4034};
+constexpr Result ResultInvalidBucketTreeNodeOffset{ErrorModule::FS, 4035};
+constexpr Result ResultInvalidBucketTreeEntryOffset{ErrorModule::FS, 4036};
+constexpr Result ResultInvalidBucketTreeEntrySetOffset{ErrorModule::FS, 4037};
+constexpr Result ResultInvalidBucketTreeNodeIndex{ErrorModule::FS, 4038};
+constexpr Result ResultInvalidBucketTreeVirtualOffset{ErrorModule::FS, 4039};
+constexpr Result ResultRomNcaInvalidPatchMetaDataHashType{ErrorModule::FS, 4084};
+constexpr Result ResultRomNcaInvalidIntegrityLayerInfoOffset{ErrorModule::FS, 4085};
+constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataSize{ErrorModule::FS, 4086};
+constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataOffset{ErrorModule::FS, 4087};
+constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataHash{ErrorModule::FS, 4088};
+constexpr Result ResultRomNcaInvalidSparseMetaDataHashType{ErrorModule::FS, 4089};
+constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataSize{ErrorModule::FS, 4090};
+constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataOffset{ErrorModule::FS, 4091};
+constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataHash{ErrorModule::FS, 4091};
+constexpr Result ResultNcaBaseStorageOutOfRangeB{ErrorModule::FS, 4509};
+constexpr Result ResultNcaBaseStorageOutOfRangeC{ErrorModule::FS, 4510};
+constexpr Result ResultNcaBaseStorageOutOfRangeD{ErrorModule::FS, 4511};
+constexpr Result ResultInvalidNcaSignature{ErrorModule::FS, 4517};
+constexpr Result ResultNcaFsHeaderHashVerificationFailed{ErrorModule::FS, 4520};
+constexpr Result ResultInvalidNcaKeyIndex{ErrorModule::FS, 4521};
+constexpr Result ResultInvalidNcaFsHeaderHashType{ErrorModule::FS, 4522};
+constexpr Result ResultInvalidNcaFsHeaderEncryptionType{ErrorModule::FS, 4523};
+constexpr Result ResultInvalidNcaPatchInfoIndirectSize{ErrorModule::FS, 4524};
+constexpr Result ResultInvalidNcaPatchInfoAesCtrExSize{ErrorModule::FS, 4525};
+constexpr Result ResultInvalidNcaPatchInfoAesCtrExOffset{ErrorModule::FS, 4526};
+constexpr Result ResultInvalidNcaHeader{ErrorModule::FS, 4528};
+constexpr Result ResultInvalidNcaFsHeader{ErrorModule::FS, 4529};
+constexpr Result ResultNcaBaseStorageOutOfRangeE{ErrorModule::FS, 4530};
+constexpr Result ResultInvalidHierarchicalSha256BlockSize{ErrorModule::FS, 4532};
+constexpr Result ResultInvalidHierarchicalSha256LayerCount{ErrorModule::FS, 4533};
+constexpr Result ResultHierarchicalSha256BaseStorageTooLarge{ErrorModule::FS, 4534};
+constexpr Result ResultHierarchicalSha256HashVerificationFailed{ErrorModule::FS, 4535};
+constexpr Result ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount{ErrorModule::FS, 4541};
+constexpr Result ResultInvalidNcaIndirectStorageOutOfRange{ErrorModule::FS, 4542};
+constexpr Result ResultInvalidNcaHeader1SignatureKeyGeneration{ErrorModule::FS, 4543};
+constexpr Result ResultInvalidCompressedStorageSize{ErrorModule::FS, 4547};
+constexpr Result ResultInvalidNcaMetaDataHashDataSize{ErrorModule::FS, 4548};
+constexpr Result ResultInvalidNcaMetaDataHashDataHash{ErrorModule::FS, 4549};
+constexpr Result ResultUnexpectedInCompressedStorageA{ErrorModule::FS, 5324};
+constexpr Result ResultUnexpectedInCompressedStorageB{ErrorModule::FS, 5325};
+constexpr Result ResultUnexpectedInCompressedStorageC{ErrorModule::FS, 5326};
+constexpr Result ResultUnexpectedInCompressedStorageD{ErrorModule::FS, 5327};
+constexpr Result ResultInvalidArgument{ErrorModule::FS, 6001};
+constexpr Result ResultInvalidOffset{ErrorModule::FS, 6061};
+constexpr Result ResultInvalidSize{ErrorModule::FS, 6062};
+constexpr Result ResultNullptrArgument{ErrorModule::FS, 6063};
+constexpr Result ResultUnsupportedSetSizeForIndirectStorage{ErrorModule::FS, 6325};
+constexpr Result ResultUnsupportedWriteForCompressedStorage{ErrorModule::FS, 6387};
+constexpr Result ResultUnsupportedOperateRangeForCompressedStorage{ErrorModule::FS, 6388};
+constexpr Result ResultBufferAllocationFailed{ErrorModule::FS, 6705};
+
} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fs_i_storage.h b/src/core/file_sys/fssystem/fs_i_storage.h
new file mode 100644
index 000000000..416dd57b8
--- /dev/null
+++ b/src/core/file_sys/fssystem/fs_i_storage.h
@@ -0,0 +1,58 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/overflow.h"
+#include "core/file_sys/errors.h"
+#include "core/file_sys/vfs.h"
+
+namespace FileSys {
+
+class IStorage : public VfsFile {
+public:
+ virtual std::string GetName() const override {
+ return {};
+ }
+
+ virtual VirtualDir GetContainingDirectory() const override {
+ return {};
+ }
+
+ virtual bool IsWritable() const override {
+ return true;
+ }
+
+ virtual bool IsReadable() const override {
+ return true;
+ }
+
+ virtual bool Resize(size_t size) override {
+ return false;
+ }
+
+ virtual bool Rename(std::string_view name) override {
+ return false;
+ }
+
+ static inline Result CheckAccessRange(s64 offset, s64 size, s64 total_size) {
+ R_UNLESS(offset >= 0, ResultInvalidOffset);
+ R_UNLESS(size >= 0, ResultInvalidSize);
+ R_UNLESS(Common::WrappingAdd(offset, size) >= offset, ResultOutOfRange);
+ R_UNLESS(offset + size <= total_size, ResultOutOfRange);
+ R_SUCCEED();
+ }
+};
+
+class IReadOnlyStorage : public IStorage {
+public:
+ virtual bool IsWritable() const override {
+ return false;
+ }
+
+ virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
+ return 0;
+ }
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fs_types.h b/src/core/file_sys/fssystem/fs_types.h
new file mode 100644
index 000000000..43aeaf447
--- /dev/null
+++ b/src/core/file_sys/fssystem/fs_types.h
@@ -0,0 +1,46 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+
+namespace FileSys {
+
+struct Int64 {
+ u32 low;
+ u32 high;
+
+ constexpr void Set(s64 v) {
+ this->low = static_cast<u32>((v & static_cast<u64>(0x00000000FFFFFFFFULL)) >> 0);
+ this->high = static_cast<u32>((v & static_cast<u64>(0xFFFFFFFF00000000ULL)) >> 32);
+ }
+
+ constexpr s64 Get() const {
+ return (static_cast<s64>(this->high) << 32) | (static_cast<s64>(this->low));
+ }
+
+ constexpr Int64& operator=(s64 v) {
+ this->Set(v);
+ return *this;
+ }
+
+ constexpr operator s64() const {
+ return this->Get();
+ }
+};
+
+struct HashSalt {
+ static constexpr size_t Size = 32;
+
+ std::array<u8, Size> value;
+};
+static_assert(std::is_trivial_v<HashSalt>);
+static_assert(sizeof(HashSalt) == HashSalt::Size);
+
+constexpr inline size_t IntegrityMinLayerCount = 2;
+constexpr inline size_t IntegrityMaxLayerCount = 7;
+constexpr inline size_t IntegrityLayerCountSave = 5;
+constexpr inline size_t IntegrityLayerCountSaveDataMeta = 4;
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
new file mode 100644
index 000000000..f25c95472
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
@@ -0,0 +1,251 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
+#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
+#include "core/file_sys/fssystem/fssystem_nca_header.h"
+#include "core/file_sys/vfs_offset.h"
+
+namespace FileSys {
+
+namespace {
+
+class SoftwareDecryptor final : public AesCtrCounterExtendedStorage::IDecryptor {
+public:
+ virtual void Decrypt(
+ u8* buf, size_t buf_size, const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
+ const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) override final;
+};
+
+} // namespace
+
+Result AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out) {
+ std::unique_ptr<IDecryptor> decryptor = std::make_unique<SoftwareDecryptor>();
+ R_UNLESS(decryptor != nullptr, ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA);
+ *out = std::move(decryptor);
+ R_SUCCEED();
+}
+
+Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
+ VirtualFile data_storage,
+ VirtualFile table_storage) {
+ // Read and verify the bucket tree header.
+ BucketTree::Header header;
+ table_storage->ReadObject(std::addressof(header), 0);
+ R_TRY(header.Verify());
+
+ // Determine extents.
+ const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
+ const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
+ const auto node_storage_offset = QueryHeaderStorageSize();
+ const auto entry_storage_offset = node_storage_offset + node_storage_size;
+
+ // Create a software decryptor.
+ std::unique_ptr<IDecryptor> sw_decryptor;
+ R_TRY(CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
+
+ // Initialize.
+ R_RETURN(this->Initialize(
+ key, key_size, secure_value, 0, data_storage,
+ std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
+ std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
+ header.entry_count, std::move(sw_decryptor)));
+}
+
+Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
+ s64 counter_offset, VirtualFile data_storage,
+ VirtualFile node_storage, VirtualFile entry_storage,
+ s32 entry_count,
+ std::unique_ptr<IDecryptor>&& decryptor) {
+ // Validate preconditions.
+ ASSERT(key != nullptr);
+ ASSERT(key_size == KeySize);
+ ASSERT(counter_offset >= 0);
+ ASSERT(decryptor != nullptr);
+
+ // Initialize the bucket tree table.
+ if (entry_count > 0) {
+ R_TRY(
+ m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
+ } else {
+ m_table.Initialize(NodeSize, 0);
+ }
+
+ // Set members.
+ m_data_storage = data_storage;
+ std::memcpy(m_key.data(), key, key_size);
+ m_secure_value = secure_value;
+ m_counter_offset = counter_offset;
+ m_decryptor = std::move(decryptor);
+
+ R_SUCCEED();
+}
+
+void AesCtrCounterExtendedStorage::Finalize() {
+ if (this->IsInitialized()) {
+ m_table.Finalize();
+ m_data_storage = VirtualFile();
+ }
+}
+
+Result AesCtrCounterExtendedStorage::GetEntryList(Entry* out_entries, s32* out_entry_count,
+ s32 entry_count, s64 offset, s64 size) {
+ // Validate pre-conditions.
+ ASSERT(offset >= 0);
+ ASSERT(size >= 0);
+ ASSERT(this->IsInitialized());
+
+ // Clear the out count.
+ R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
+ *out_entry_count = 0;
+
+ // Succeed if there's no range.
+ R_SUCCEED_IF(size == 0);
+
+ // If we have an output array, we need it to be non-null.
+ R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
+
+ // Check that our range is valid.
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
+
+ // Find the offset in our tree.
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get<Entry>()->GetOffset();
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
+ ResultInvalidAesCtrCounterExtendedEntryOffset);
+ }
+
+ // Prepare to loop over entries.
+ const auto end_offset = offset + static_cast<s64>(size);
+ s32 count = 0;
+
+ auto cur_entry = *visitor.Get<Entry>();
+ while (cur_entry.GetOffset() < end_offset) {
+ // Try to write the entry to the out list.
+ if (entry_count != 0) {
+ if (count >= entry_count) {
+ break;
+ }
+ std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
+ }
+
+ count++;
+
+ // Advance.
+ if (visitor.CanMoveNext()) {
+ R_TRY(visitor.MoveNext());
+ cur_entry = *visitor.Get<Entry>();
+ } else {
+ break;
+ }
+ }
+
+ // Write the output count.
+ *out_entry_count = count;
+ R_SUCCEED();
+}
+
+size_t AesCtrCounterExtendedStorage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Validate preconditions.
+ ASSERT(this->IsInitialized());
+
+ // Allow zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+ ASSERT(Common::IsAligned(offset, BlockSize));
+ ASSERT(Common::IsAligned(size, BlockSize));
+
+ BucketTree::Offsets table_offsets;
+ ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(table_offsets))));
+
+ ASSERT(table_offsets.IsInclude(offset, size));
+
+ // Read the data.
+ m_data_storage->Read(buffer, size, offset);
+
+ // Find the offset in our tree.
+ BucketTree::Visitor visitor;
+ ASSERT(R_SUCCEEDED(m_table.Find(std::addressof(visitor), offset)));
+ {
+ const auto entry_offset = visitor.Get<Entry>()->GetOffset();
+ ASSERT(Common::IsAligned(entry_offset, BlockSize));
+ ASSERT(0 <= entry_offset && table_offsets.IsInclude(entry_offset));
+ }
+
+ // Prepare to read in chunks.
+ u8* cur_data = static_cast<u8*>(buffer);
+ auto cur_offset = offset;
+ const auto end_offset = offset + static_cast<s64>(size);
+
+ while (cur_offset < end_offset) {
+ // Get the current entry.
+ const auto cur_entry = *visitor.Get<Entry>();
+
+ // Get and validate the entry's offset.
+ const auto cur_entry_offset = cur_entry.GetOffset();
+ ASSERT(static_cast<size_t>(cur_entry_offset) <= cur_offset);
+
+ // Get and validate the next entry offset.
+ s64 next_entry_offset;
+ if (visitor.CanMoveNext()) {
+ ASSERT(R_SUCCEEDED(visitor.MoveNext()));
+ next_entry_offset = visitor.Get<Entry>()->GetOffset();
+ ASSERT(table_offsets.IsInclude(next_entry_offset));
+ } else {
+ next_entry_offset = table_offsets.end_offset;
+ }
+ ASSERT(Common::IsAligned(next_entry_offset, BlockSize));
+ ASSERT(cur_offset < static_cast<size_t>(next_entry_offset));
+
+ // Get the offset of the entry in the data we read.
+ const auto data_offset = cur_offset - cur_entry_offset;
+ const auto data_size = (next_entry_offset - cur_entry_offset) - data_offset;
+ ASSERT(data_size > 0);
+
+ // Determine how much is left.
+ const auto remaining_size = end_offset - cur_offset;
+ const auto cur_size = static_cast<size_t>(std::min(remaining_size, data_size));
+ ASSERT(cur_size <= size);
+
+ // If necessary, perform decryption.
+ if (cur_entry.encryption_value == Entry::Encryption::Encrypted) {
+ // Make the CTR for the data we're decrypting.
+ const auto counter_offset = m_counter_offset + cur_entry_offset + data_offset;
+ NcaAesCtrUpperIv upper_iv = {
+ .part = {.generation = static_cast<u32>(cur_entry.generation),
+ .secure_value = m_secure_value}};
+
+ std::array<u8, IvSize> iv;
+ AesCtrStorage::MakeIv(iv.data(), IvSize, upper_iv.value, counter_offset);
+
+ // Decrypt.
+ m_decryptor->Decrypt(cur_data, cur_size, m_key, iv);
+ }
+
+ // Advance.
+ cur_data += cur_size;
+ cur_offset += cur_size;
+ }
+
+ return size;
+}
+
+void SoftwareDecryptor::Decrypt(u8* buf, size_t buf_size,
+ const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
+ const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) {
+ Core::Crypto::AESCipher<Core::Crypto::Key128, AesCtrCounterExtendedStorage::KeySize> cipher(
+ key, Core::Crypto::Mode::CTR);
+ cipher.SetIV(iv);
+ cipher.Transcode(buf, buf_size, buf, Core::Crypto::Op::Decrypt);
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
new file mode 100644
index 000000000..d0e9ceed0
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
@@ -0,0 +1,114 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <optional>
+
+#include "common/literals.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
+
+namespace FileSys {
+
+using namespace Common::Literals;
+
+class AesCtrCounterExtendedStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(AesCtrCounterExtendedStorage);
+ YUZU_NON_MOVEABLE(AesCtrCounterExtendedStorage);
+
+public:
+ static constexpr size_t BlockSize = 0x10;
+ static constexpr size_t KeySize = 0x10;
+ static constexpr size_t IvSize = 0x10;
+ static constexpr size_t NodeSize = 16_KiB;
+
+ class IDecryptor {
+ public:
+ virtual ~IDecryptor() {}
+ virtual void Decrypt(u8* buf, size_t buf_size, const std::array<u8, KeySize>& key,
+ const std::array<u8, IvSize>& iv) = 0;
+ };
+
+ struct Entry {
+ enum class Encryption : u8 {
+ Encrypted = 0,
+ NotEncrypted = 1,
+ };
+
+ std::array<u8, sizeof(s64)> offset;
+ Encryption encryption_value;
+ std::array<u8, 3> reserved;
+ s32 generation;
+
+ void SetOffset(s64 value) {
+ std::memcpy(this->offset.data(), std::addressof(value), sizeof(s64));
+ }
+
+ s64 GetOffset() const {
+ s64 value;
+ std::memcpy(std::addressof(value), this->offset.data(), sizeof(s64));
+ return value;
+ }
+ };
+ static_assert(sizeof(Entry) == 0x10);
+ static_assert(alignof(Entry) == 4);
+ static_assert(std::is_trivial_v<Entry>);
+
+public:
+ static constexpr s64 QueryHeaderStorageSize() {
+ return BucketTree::QueryHeaderStorageSize();
+ }
+
+ static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
+ return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
+ }
+
+ static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
+ return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
+ }
+
+ static Result CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out);
+
+public:
+ AesCtrCounterExtendedStorage()
+ : m_table(), m_data_storage(), m_secure_value(), m_counter_offset(), m_decryptor() {}
+ virtual ~AesCtrCounterExtendedStorage() {
+ this->Finalize();
+ }
+
+ Result Initialize(const void* key, size_t key_size, u32 secure_value, s64 counter_offset,
+ VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
+ s32 entry_count, std::unique_ptr<IDecryptor>&& decryptor);
+ void Finalize();
+
+ bool IsInitialized() const {
+ return m_table.IsInitialized();
+ }
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+
+ virtual size_t GetSize() const override {
+ BucketTree::Offsets offsets;
+ ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(offsets))));
+
+ return offsets.end_offset;
+ }
+
+ Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
+ s64 size);
+
+private:
+ Result Initialize(const void* key, size_t key_size, u32 secure_value, VirtualFile data_storage,
+ VirtualFile table_storage);
+
+private:
+ mutable BucketTree m_table;
+ VirtualFile m_data_storage;
+ std::array<u8, KeySize> m_key;
+ u32 m_secure_value;
+ s64 m_counter_offset;
+ std::unique_ptr<IDecryptor> m_decryptor;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp
new file mode 100644
index 000000000..b65aca18d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp
@@ -0,0 +1,129 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "common/swap.h"
+#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+#include "core/file_sys/fssystem/fssystem_utility.h"
+
+namespace FileSys {
+
+void AesCtrStorage::MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset) {
+ ASSERT(dst != nullptr);
+ ASSERT(dst_size == IvSize);
+ ASSERT(offset >= 0);
+
+ const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
+
+ *reinterpret_cast<u64_be*>(out_addr + 0) = upper;
+ *reinterpret_cast<s64_be*>(out_addr + sizeof(u64)) = static_cast<s64>(offset / BlockSize);
+}
+
+AesCtrStorage::AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
+ size_t iv_size)
+ : m_base_storage(std::move(base)) {
+ ASSERT(m_base_storage != nullptr);
+ ASSERT(key != nullptr);
+ ASSERT(iv != nullptr);
+ ASSERT(key_size == KeySize);
+ ASSERT(iv_size == IvSize);
+
+ std::memcpy(m_key.data(), key, KeySize);
+ std::memcpy(m_iv.data(), iv, IvSize);
+
+ m_cipher.emplace(m_key, Core::Crypto::Mode::CTR);
+}
+
+size_t AesCtrStorage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Allow zero-size reads.
+ if (size == 0) {
+ return size;
+ }
+
+ // Ensure buffer is valid.
+ ASSERT(buffer != nullptr);
+
+ // We can only read at block aligned offsets.
+ ASSERT(Common::IsAligned(offset, BlockSize));
+ ASSERT(Common::IsAligned(size, BlockSize));
+
+ // Read the data.
+ m_base_storage->Read(buffer, size, offset);
+
+ // Setup the counter.
+ std::array<u8, IvSize> ctr;
+ std::memcpy(ctr.data(), m_iv.data(), IvSize);
+ AddCounter(ctr.data(), IvSize, offset / BlockSize);
+
+ // Decrypt.
+ m_cipher->SetIV(ctr);
+ m_cipher->Transcode(buffer, size, buffer, Core::Crypto::Op::Decrypt);
+
+ return size;
+}
+
+size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
+ // Allow zero-size writes.
+ if (size == 0) {
+ return size;
+ }
+
+ // Ensure buffer is valid.
+ ASSERT(buffer != nullptr);
+
+ // We can only write at block aligned offsets.
+ ASSERT(Common::IsAligned(offset, BlockSize));
+ ASSERT(Common::IsAligned(size, BlockSize));
+
+ // Get a pooled buffer.
+ PooledBuffer pooled_buffer;
+ const bool use_work_buffer = true;
+ if (use_work_buffer) {
+ pooled_buffer.Allocate(size, BlockSize);
+ }
+
+ // Setup the counter.
+ std::array<u8, IvSize> ctr;
+ std::memcpy(ctr.data(), m_iv.data(), IvSize);
+ AddCounter(ctr.data(), IvSize, offset / BlockSize);
+
+ // Loop until all data is written.
+ size_t remaining = size;
+ s64 cur_offset = 0;
+ while (remaining > 0) {
+ // Determine data we're writing and where.
+ const size_t write_size =
+ use_work_buffer ? std::min(pooled_buffer.GetSize(), remaining) : remaining;
+
+ void* write_buf;
+ if (use_work_buffer) {
+ write_buf = pooled_buffer.GetBuffer();
+ } else {
+ write_buf = const_cast<u8*>(buffer);
+ }
+
+ // Encrypt the data.
+ m_cipher->SetIV(ctr);
+ m_cipher->Transcode(buffer, write_size, reinterpret_cast<u8*>(write_buf),
+ Core::Crypto::Op::Encrypt);
+
+ // Write the encrypted data.
+ m_base_storage->Write(reinterpret_cast<u8*>(write_buf), write_size, offset + cur_offset);
+
+ // Advance.
+ cur_offset += write_size;
+ remaining -= write_size;
+ if (remaining > 0) {
+ AddCounter(ctr.data(), IvSize, write_size / BlockSize);
+ }
+ }
+
+ return size;
+}
+
+size_t AesCtrStorage::GetSize() const {
+ return m_base_storage->GetSize();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h
new file mode 100644
index 000000000..339e49697
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h
@@ -0,0 +1,43 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <optional>
+
+#include "core/crypto/aes_util.h"
+#include "core/crypto/key_manager.h"
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/vfs.h"
+
+namespace FileSys {
+
+class AesCtrStorage : public IStorage {
+ YUZU_NON_COPYABLE(AesCtrStorage);
+ YUZU_NON_MOVEABLE(AesCtrStorage);
+
+public:
+ static constexpr size_t BlockSize = 0x10;
+ static constexpr size_t KeySize = 0x10;
+ static constexpr size_t IvSize = 0x10;
+
+public:
+ static void MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset);
+
+public:
+ AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
+ size_t iv_size);
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+ virtual size_t Write(const u8* buffer, size_t size, size_t offset) override;
+ virtual size_t GetSize() const override;
+
+private:
+ VirtualFile m_base_storage;
+ std::array<u8, KeySize> m_key;
+ std::array<u8, IvSize> m_iv;
+ mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key128>> m_cipher;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp
new file mode 100644
index 000000000..022424229
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp
@@ -0,0 +1,112 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "common/swap.h"
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+#include "core/file_sys/fssystem/fssystem_utility.h"
+
+namespace FileSys {
+
+void AesXtsStorage::MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size) {
+ ASSERT(dst != nullptr);
+ ASSERT(dst_size == IvSize);
+ ASSERT(offset >= 0);
+
+ const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
+
+ *reinterpret_cast<s64_be*>(out_addr + sizeof(s64)) = offset / block_size;
+}
+
+AesXtsStorage::AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
+ const void* iv, size_t iv_size, size_t block_size)
+ : m_base_storage(std::move(base)), m_block_size(block_size), m_mutex() {
+ ASSERT(m_base_storage != nullptr);
+ ASSERT(key1 != nullptr);
+ ASSERT(key2 != nullptr);
+ ASSERT(iv != nullptr);
+ ASSERT(key_size == KeySize);
+ ASSERT(iv_size == IvSize);
+ ASSERT(Common::IsAligned(m_block_size, AesBlockSize));
+
+ std::memcpy(m_key.data() + 0, key1, KeySize);
+ std::memcpy(m_key.data() + 0x10, key2, KeySize);
+ std::memcpy(m_iv.data(), iv, IvSize);
+
+ m_cipher.emplace(m_key, Core::Crypto::Mode::XTS);
+}
+
+size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Allow zero-size reads.
+ if (size == 0) {
+ return size;
+ }
+
+ // Ensure buffer is valid.
+ ASSERT(buffer != nullptr);
+
+ // We can only read at block aligned offsets.
+ ASSERT(Common::IsAligned(offset, AesBlockSize));
+ ASSERT(Common::IsAligned(size, AesBlockSize));
+
+ // Read the data.
+ m_base_storage->Read(buffer, size, offset);
+
+ // Setup the counter.
+ std::array<u8, IvSize> ctr;
+ std::memcpy(ctr.data(), m_iv.data(), IvSize);
+ AddCounter(ctr.data(), IvSize, offset / m_block_size);
+
+ // Handle any unaligned data before the start.
+ size_t processed_size = 0;
+ if ((offset % m_block_size) != 0) {
+ // Determine the size of the pre-data read.
+ const size_t skip_size =
+ static_cast<size_t>(offset - Common::AlignDown(offset, m_block_size));
+ const size_t data_size = std::min(size, m_block_size - skip_size);
+
+ // Decrypt into a pooled buffer.
+ {
+ PooledBuffer tmp_buf(m_block_size, m_block_size);
+ ASSERT(tmp_buf.GetSize() >= m_block_size);
+
+ std::memset(tmp_buf.GetBuffer(), 0, skip_size);
+ std::memcpy(tmp_buf.GetBuffer() + skip_size, buffer, data_size);
+
+ m_cipher->SetIV(ctr);
+ m_cipher->Transcode(tmp_buf.GetBuffer(), m_block_size, tmp_buf.GetBuffer(),
+ Core::Crypto::Op::Decrypt);
+
+ std::memcpy(buffer, tmp_buf.GetBuffer() + skip_size, data_size);
+ }
+
+ AddCounter(ctr.data(), IvSize, 1);
+ processed_size += data_size;
+ ASSERT(processed_size == std::min(size, m_block_size - skip_size));
+ }
+
+ // Decrypt aligned chunks.
+ char* cur = reinterpret_cast<char*>(buffer) + processed_size;
+ size_t remaining = size - processed_size;
+ while (remaining > 0) {
+ const size_t cur_size = std::min(m_block_size, remaining);
+
+ m_cipher->SetIV(ctr);
+ m_cipher->Transcode(cur, cur_size, cur, Core::Crypto::Op::Decrypt);
+
+ remaining -= cur_size;
+ cur += cur_size;
+
+ AddCounter(ctr.data(), IvSize, 1);
+ }
+
+ return size;
+}
+
+size_t AesXtsStorage::GetSize() const {
+ return m_base_storage->GetSize();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h
new file mode 100644
index 000000000..f342efb57
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h
@@ -0,0 +1,42 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <optional>
+
+#include "core/crypto/aes_util.h"
+#include "core/crypto/key_manager.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+
+namespace FileSys {
+
+class AesXtsStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(AesXtsStorage);
+ YUZU_NON_MOVEABLE(AesXtsStorage);
+
+public:
+ static constexpr size_t AesBlockSize = 0x10;
+ static constexpr size_t KeySize = 0x20;
+ static constexpr size_t IvSize = 0x10;
+
+public:
+ static void MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size);
+
+public:
+ AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
+ const void* iv, size_t iv_size, size_t block_size);
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+ virtual size_t GetSize() const override;
+
+private:
+ VirtualFile m_base_storage;
+ std::array<u8, KeySize> m_key;
+ std::array<u8, IvSize> m_iv;
+ const size_t m_block_size;
+ std::mutex m_mutex;
+ mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key256>> m_cipher;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h
new file mode 100644
index 000000000..f96691d03
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h
@@ -0,0 +1,146 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/alignment.h"
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+
+namespace FileSys {
+
+template <size_t DataAlign_, size_t BufferAlign_>
+class AlignmentMatchingStorage : public IStorage {
+ YUZU_NON_COPYABLE(AlignmentMatchingStorage);
+ YUZU_NON_MOVEABLE(AlignmentMatchingStorage);
+
+public:
+ static constexpr size_t DataAlign = DataAlign_;
+ static constexpr size_t BufferAlign = BufferAlign_;
+
+ static constexpr size_t DataAlignMax = 0x200;
+ static_assert(DataAlign <= DataAlignMax);
+ static_assert(Common::IsPowerOfTwo(DataAlign));
+ static_assert(Common::IsPowerOfTwo(BufferAlign));
+
+private:
+ VirtualFile m_base_storage;
+ s64 m_base_storage_size;
+
+public:
+ explicit AlignmentMatchingStorage(VirtualFile bs) : m_base_storage(std::move(bs)) {}
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ // Allocate a work buffer on stack.
+ alignas(DataAlignMax) std::array<char, DataAlign> work_buf;
+
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ s64 bs_size = this->GetSize();
+ ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
+
+ return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf.data(), work_buf.size(),
+ DataAlign, BufferAlign, offset, buffer, size);
+ }
+
+ virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
+ // Allocate a work buffer on stack.
+ alignas(DataAlignMax) std::array<char, DataAlign> work_buf;
+
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ s64 bs_size = this->GetSize();
+ ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
+
+ return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf.data(), work_buf.size(),
+ DataAlign, BufferAlign, offset, buffer, size);
+ }
+
+ virtual size_t GetSize() const override {
+ return m_base_storage->GetSize();
+ }
+};
+
+template <size_t BufferAlign_>
+class AlignmentMatchingStoragePooledBuffer : public IStorage {
+ YUZU_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer);
+ YUZU_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer);
+
+public:
+ static constexpr size_t BufferAlign = BufferAlign_;
+
+ static_assert(Common::IsPowerOfTwo(BufferAlign));
+
+private:
+ VirtualFile m_base_storage;
+ s64 m_base_storage_size;
+ size_t m_data_align;
+
+public:
+ explicit AlignmentMatchingStoragePooledBuffer(VirtualFile bs, size_t da)
+ : m_base_storage(std::move(bs)), m_data_align(da) {
+ ASSERT(Common::IsPowerOfTwo(da));
+ }
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ s64 bs_size = this->GetSize();
+ ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
+
+ // Allocate a pooled buffer.
+ PooledBuffer pooled_buffer;
+ pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
+
+ return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(),
+ pooled_buffer.GetSize(), m_data_align,
+ BufferAlign, offset, buffer, size);
+ }
+
+ virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ s64 bs_size = this->GetSize();
+ ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
+
+ // Allocate a pooled buffer.
+ PooledBuffer pooled_buffer;
+ pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
+
+ return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(),
+ pooled_buffer.GetSize(), m_data_align,
+ BufferAlign, offset, buffer, size);
+ }
+
+ virtual size_t GetSize() const override {
+ return m_base_storage->GetSize();
+ }
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
new file mode 100644
index 000000000..641c888ae
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
@@ -0,0 +1,204 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
+
+namespace FileSys {
+
+namespace {
+
+template <typename T>
+constexpr size_t GetRoundDownDifference(T x, size_t align) {
+ return static_cast<size_t>(x - Common::AlignDown(x, align));
+}
+
+template <typename T>
+constexpr size_t GetRoundUpDifference(T x, size_t align) {
+ return static_cast<size_t>(Common::AlignUp(x, align) - x);
+}
+
+template <typename T>
+size_t GetRoundUpDifference(T* x, size_t align) {
+ return GetRoundUpDifference(reinterpret_cast<uintptr_t>(x), align);
+}
+
+} // namespace
+
+size_t AlignmentMatchingStorageImpl::Read(VirtualFile base_storage, char* work_buf,
+ size_t work_buf_size, size_t data_alignment,
+ size_t buffer_alignment, s64 offset, u8* buffer,
+ size_t size) {
+ // Check preconditions.
+ ASSERT(work_buf_size >= data_alignment);
+
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ // Determine extents.
+ u8* aligned_core_buffer;
+ s64 core_offset;
+ size_t core_size;
+ size_t buffer_gap;
+ size_t offset_gap;
+ s64 covered_offset;
+
+ const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
+ if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
+ buffer_alignment)) {
+ aligned_core_buffer = buffer + offset_round_up_difference;
+
+ core_offset = Common::AlignUp(offset, data_alignment);
+ core_size = (size < offset_round_up_difference)
+ ? 0
+ : Common::AlignDown(size - offset_round_up_difference, data_alignment);
+ buffer_gap = 0;
+ offset_gap = 0;
+
+ covered_offset = core_size > 0 ? core_offset : offset;
+ } else {
+ const size_t buffer_round_up_difference = GetRoundUpDifference(buffer, buffer_alignment);
+
+ aligned_core_buffer = buffer + buffer_round_up_difference;
+
+ core_offset = Common::AlignDown(offset, data_alignment);
+ core_size = (size < buffer_round_up_difference)
+ ? 0
+ : Common::AlignDown(size - buffer_round_up_difference, data_alignment);
+ buffer_gap = buffer_round_up_difference;
+ offset_gap = GetRoundDownDifference(offset, data_alignment);
+
+ covered_offset = offset;
+ }
+
+ // Read the core portion.
+ if (core_size > 0) {
+ base_storage->Read(aligned_core_buffer, core_size, core_offset);
+
+ if (offset_gap != 0 || buffer_gap != 0) {
+ std::memmove(aligned_core_buffer - buffer_gap, aligned_core_buffer + offset_gap,
+ core_size - offset_gap);
+ core_size -= offset_gap;
+ }
+ }
+
+ // Handle the head portion.
+ if (offset < covered_offset) {
+ const s64 head_offset = Common::AlignDown(offset, data_alignment);
+ const size_t head_size = static_cast<size_t>(covered_offset - offset);
+
+ ASSERT(GetRoundDownDifference(offset, data_alignment) + head_size <= work_buf_size);
+
+ base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
+ std::memcpy(buffer, work_buf + GetRoundDownDifference(offset, data_alignment), head_size);
+ }
+
+ // Handle the tail portion.
+ s64 tail_offset = covered_offset + core_size;
+ size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
+ while (remaining_tail_size > 0) {
+ const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
+ const auto cur_size =
+ std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
+ remaining_tail_size);
+ base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
+
+ ASSERT((tail_offset - offset) + cur_size <= size);
+ ASSERT((tail_offset - aligned_tail_offset) + cur_size <= data_alignment);
+ std::memcpy(reinterpret_cast<char*>(buffer) + (tail_offset - offset),
+ work_buf + (tail_offset - aligned_tail_offset), cur_size);
+
+ remaining_tail_size -= cur_size;
+ tail_offset += cur_size;
+ }
+
+ return size;
+}
+
+size_t AlignmentMatchingStorageImpl::Write(VirtualFile base_storage, char* work_buf,
+ size_t work_buf_size, size_t data_alignment,
+ size_t buffer_alignment, s64 offset, const u8* buffer,
+ size_t size) {
+ // Check preconditions.
+ ASSERT(work_buf_size >= data_alignment);
+
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ // Determine extents.
+ const u8* aligned_core_buffer;
+ s64 core_offset;
+ size_t core_size;
+ s64 covered_offset;
+
+ const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
+ if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
+ buffer_alignment)) {
+ aligned_core_buffer = buffer + offset_round_up_difference;
+
+ core_offset = Common::AlignUp(offset, data_alignment);
+ core_size = (size < offset_round_up_difference)
+ ? 0
+ : Common::AlignDown(size - offset_round_up_difference, data_alignment);
+
+ covered_offset = core_size > 0 ? core_offset : offset;
+ } else {
+ aligned_core_buffer = nullptr;
+
+ core_offset = Common::AlignDown(offset, data_alignment);
+ core_size = 0;
+
+ covered_offset = offset;
+ }
+
+ // Write the core portion.
+ if (core_size > 0) {
+ base_storage->Write(aligned_core_buffer, core_size, core_offset);
+ }
+
+ // Handle the head portion.
+ if (offset < covered_offset) {
+ const s64 head_offset = Common::AlignDown(offset, data_alignment);
+ const size_t head_size = static_cast<size_t>(covered_offset - offset);
+
+ ASSERT((offset - head_offset) + head_size <= data_alignment);
+
+ base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
+ std::memcpy(work_buf + (offset - head_offset), buffer, head_size);
+ base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
+ }
+
+ // Handle the tail portion.
+ s64 tail_offset = covered_offset + core_size;
+ size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
+ while (remaining_tail_size > 0) {
+ ASSERT(static_cast<size_t>(tail_offset - offset) < size);
+
+ const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
+ const auto cur_size =
+ std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
+ remaining_tail_size);
+
+ base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
+ std::memcpy(work_buf + GetRoundDownDifference(tail_offset, data_alignment),
+ buffer + (tail_offset - offset), cur_size);
+ base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
+
+ remaining_tail_size -= cur_size;
+ tail_offset += cur_size;
+ }
+
+ return size;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
new file mode 100644
index 000000000..4a05b0e88
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
@@ -0,0 +1,21 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+
+namespace FileSys {
+
+class AlignmentMatchingStorageImpl {
+public:
+ static size_t Read(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
+ size_t data_alignment, size_t buffer_alignment, s64 offset, u8* buffer,
+ size_t size);
+ static size_t Write(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
+ size_t data_alignment, size_t buffer_alignment, s64 offset,
+ const u8* buffer, size_t size);
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp
new file mode 100644
index 000000000..af8541009
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp
@@ -0,0 +1,598 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+
+namespace FileSys {
+
+namespace {
+
+using Node = impl::BucketTreeNode<const s64*>;
+static_assert(sizeof(Node) == sizeof(BucketTree::NodeHeader));
+static_assert(std::is_trivial_v<Node>);
+
+constexpr inline s32 NodeHeaderSize = sizeof(BucketTree::NodeHeader);
+
+class StorageNode {
+private:
+ class Offset {
+ public:
+ using difference_type = s64;
+
+ private:
+ s64 m_offset;
+ s32 m_stride;
+
+ public:
+ constexpr Offset(s64 offset, s32 stride) : m_offset(offset), m_stride(stride) {}
+
+ constexpr Offset& operator++() {
+ m_offset += m_stride;
+ return *this;
+ }
+ constexpr Offset operator++(int) {
+ Offset ret(*this);
+ m_offset += m_stride;
+ return ret;
+ }
+
+ constexpr Offset& operator--() {
+ m_offset -= m_stride;
+ return *this;
+ }
+ constexpr Offset operator--(int) {
+ Offset ret(*this);
+ m_offset -= m_stride;
+ return ret;
+ }
+
+ constexpr difference_type operator-(const Offset& rhs) const {
+ return (m_offset - rhs.m_offset) / m_stride;
+ }
+
+ constexpr Offset operator+(difference_type ofs) const {
+ return Offset(m_offset + ofs * m_stride, m_stride);
+ }
+ constexpr Offset operator-(difference_type ofs) const {
+ return Offset(m_offset - ofs * m_stride, m_stride);
+ }
+
+ constexpr Offset& operator+=(difference_type ofs) {
+ m_offset += ofs * m_stride;
+ return *this;
+ }
+ constexpr Offset& operator-=(difference_type ofs) {
+ m_offset -= ofs * m_stride;
+ return *this;
+ }
+
+ constexpr bool operator==(const Offset& rhs) const {
+ return m_offset == rhs.m_offset;
+ }
+ constexpr bool operator!=(const Offset& rhs) const {
+ return m_offset != rhs.m_offset;
+ }
+
+ constexpr s64 Get() const {
+ return m_offset;
+ }
+ };
+
+private:
+ const Offset m_start;
+ const s32 m_count;
+ s32 m_index;
+
+public:
+ StorageNode(size_t size, s32 count)
+ : m_start(NodeHeaderSize, static_cast<s32>(size)), m_count(count), m_index(-1) {}
+ StorageNode(s64 ofs, size_t size, s32 count)
+ : m_start(NodeHeaderSize + ofs, static_cast<s32>(size)), m_count(count), m_index(-1) {}
+
+ s32 GetIndex() const {
+ return m_index;
+ }
+
+ void Find(const char* buffer, s64 virtual_address) {
+ s32 end = m_count;
+ auto pos = m_start;
+
+ while (end > 0) {
+ auto half = end / 2;
+ auto mid = pos + half;
+
+ s64 offset = 0;
+ std::memcpy(std::addressof(offset), buffer + mid.Get(), sizeof(s64));
+
+ if (offset <= virtual_address) {
+ pos = mid + 1;
+ end -= half + 1;
+ } else {
+ end = half;
+ }
+ }
+
+ m_index = static_cast<s32>(pos - m_start) - 1;
+ }
+
+ Result Find(VirtualFile storage, s64 virtual_address) {
+ s32 end = m_count;
+ auto pos = m_start;
+
+ while (end > 0) {
+ auto half = end / 2;
+ auto mid = pos + half;
+
+ s64 offset = 0;
+ storage->ReadObject(std::addressof(offset), mid.Get());
+
+ if (offset <= virtual_address) {
+ pos = mid + 1;
+ end -= half + 1;
+ } else {
+ end = half;
+ }
+ }
+
+ m_index = static_cast<s32>(pos - m_start) - 1;
+ R_SUCCEED();
+ }
+};
+
+} // namespace
+
+void BucketTree::Header::Format(s32 entry_count_) {
+ ASSERT(entry_count_ >= 0);
+
+ this->magic = Magic;
+ this->version = Version;
+ this->entry_count = entry_count_;
+ this->reserved = 0;
+}
+
+Result BucketTree::Header::Verify() const {
+ R_UNLESS(this->magic == Magic, ResultInvalidBucketTreeSignature);
+ R_UNLESS(this->entry_count >= 0, ResultInvalidBucketTreeEntryCount);
+ R_UNLESS(this->version <= Version, ResultUnsupportedVersion);
+ R_SUCCEED();
+}
+
+Result BucketTree::NodeHeader::Verify(s32 node_index, size_t node_size, size_t entry_size) const {
+ R_UNLESS(this->index == node_index, ResultInvalidBucketTreeNodeIndex);
+ R_UNLESS(entry_size != 0 && node_size >= entry_size + NodeHeaderSize, ResultInvalidSize);
+
+ const size_t max_entry_count = (node_size - NodeHeaderSize) / entry_size;
+ R_UNLESS(this->count > 0 && static_cast<size_t>(this->count) <= max_entry_count,
+ ResultInvalidBucketTreeNodeEntryCount);
+ R_UNLESS(this->offset >= 0, ResultInvalidBucketTreeNodeOffset);
+
+ R_SUCCEED();
+}
+
+Result BucketTree::Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
+ size_t entry_size, s32 entry_count) {
+ // Validate preconditions.
+ ASSERT(entry_size >= sizeof(s64));
+ ASSERT(node_size >= entry_size + sizeof(NodeHeader));
+ ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
+ ASSERT(Common::IsPowerOfTwo(node_size));
+ ASSERT(!this->IsInitialized());
+
+ // Ensure valid entry count.
+ R_UNLESS(entry_count > 0, ResultInvalidArgument);
+
+ // Allocate node.
+ R_UNLESS(m_node_l1.Allocate(node_size), ResultBufferAllocationFailed);
+ ON_RESULT_FAILURE {
+ m_node_l1.Free(node_size);
+ };
+
+ // Read node.
+ node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), node_size);
+
+ // Verify node.
+ R_TRY(m_node_l1->Verify(0, node_size, sizeof(s64)));
+
+ // Validate offsets.
+ const auto offset_count = GetOffsetCount(node_size);
+ const auto entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
+ const auto* const node = m_node_l1.Get<Node>();
+
+ s64 start_offset;
+ if (offset_count < entry_set_count && node->GetCount() < offset_count) {
+ start_offset = *node->GetEnd();
+ } else {
+ start_offset = *node->GetBegin();
+ }
+ const auto end_offset = node->GetEndOffset();
+
+ R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
+ ResultInvalidBucketTreeEntryOffset);
+ R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
+
+ // Set member variables.
+ m_node_storage = node_storage;
+ m_entry_storage = entry_storage;
+ m_node_size = node_size;
+ m_entry_size = entry_size;
+ m_entry_count = entry_count;
+ m_offset_count = offset_count;
+ m_entry_set_count = entry_set_count;
+
+ m_offset_cache.offsets.start_offset = start_offset;
+ m_offset_cache.offsets.end_offset = end_offset;
+ m_offset_cache.is_initialized = true;
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+void BucketTree::Initialize(size_t node_size, s64 end_offset) {
+ ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
+ ASSERT(Common::IsPowerOfTwo(node_size));
+ ASSERT(end_offset > 0);
+ ASSERT(!this->IsInitialized());
+
+ m_node_size = node_size;
+
+ m_offset_cache.offsets.start_offset = 0;
+ m_offset_cache.offsets.end_offset = end_offset;
+ m_offset_cache.is_initialized = true;
+}
+
+void BucketTree::Finalize() {
+ if (this->IsInitialized()) {
+ m_node_storage = VirtualFile();
+ m_entry_storage = VirtualFile();
+ m_node_l1.Free(m_node_size);
+ m_node_size = 0;
+ m_entry_size = 0;
+ m_entry_count = 0;
+ m_offset_count = 0;
+ m_entry_set_count = 0;
+
+ m_offset_cache.offsets.start_offset = 0;
+ m_offset_cache.offsets.end_offset = 0;
+ m_offset_cache.is_initialized = false;
+ }
+}
+
+Result BucketTree::Find(Visitor* visitor, s64 virtual_address) {
+ ASSERT(visitor != nullptr);
+ ASSERT(this->IsInitialized());
+
+ R_UNLESS(virtual_address >= 0, ResultInvalidOffset);
+ R_UNLESS(!this->IsEmpty(), ResultOutOfRange);
+
+ BucketTree::Offsets offsets;
+ R_TRY(this->GetOffsets(std::addressof(offsets)));
+
+ R_TRY(visitor->Initialize(this, offsets));
+
+ R_RETURN(visitor->Find(virtual_address));
+}
+
+Result BucketTree::InvalidateCache() {
+ // Reset our offsets.
+ m_offset_cache.is_initialized = false;
+
+ R_SUCCEED();
+}
+
+Result BucketTree::EnsureOffsetCache() {
+ // If we already have an offset cache, we're good.
+ R_SUCCEED_IF(m_offset_cache.is_initialized);
+
+ // Acquire exclusive right to edit the offset cache.
+ std::scoped_lock lk(m_offset_cache.mutex);
+
+ // Check again, to be sure.
+ R_SUCCEED_IF(m_offset_cache.is_initialized);
+
+ // Read/verify L1.
+ m_node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), m_node_size);
+ R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
+
+ // Get the node.
+ auto* const node = m_node_l1.Get<Node>();
+
+ s64 start_offset;
+ if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
+ start_offset = *node->GetEnd();
+ } else {
+ start_offset = *node->GetBegin();
+ }
+ const auto end_offset = node->GetEndOffset();
+
+ R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
+ ResultInvalidBucketTreeEntryOffset);
+ R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
+
+ m_offset_cache.offsets.start_offset = start_offset;
+ m_offset_cache.offsets.end_offset = end_offset;
+ m_offset_cache.is_initialized = true;
+
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets) {
+ ASSERT(tree != nullptr);
+ ASSERT(m_tree == nullptr || m_tree == tree);
+
+ if (m_entry == nullptr) {
+ m_entry = ::operator new(tree->m_entry_size);
+ R_UNLESS(m_entry != nullptr, ResultBufferAllocationFailed);
+
+ m_tree = tree;
+ m_offsets = offsets;
+ }
+
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::MoveNext() {
+ R_UNLESS(this->IsValid(), ResultOutOfRange);
+
+ // Invalidate our index, and read the header for the next index.
+ auto entry_index = m_entry_index + 1;
+ if (entry_index == m_entry_set.info.count) {
+ const auto entry_set_index = m_entry_set.info.index + 1;
+ R_UNLESS(entry_set_index < m_entry_set_count, ResultOutOfRange);
+
+ m_entry_index = -1;
+
+ const auto end = m_entry_set.info.end;
+
+ const auto entry_set_size = m_tree->m_node_size;
+ const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
+
+ m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
+ R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
+
+ R_UNLESS(m_entry_set.info.start == end && m_entry_set.info.start < m_entry_set.info.end,
+ ResultInvalidBucketTreeEntrySetOffset);
+
+ entry_index = 0;
+ } else {
+ m_entry_index = -1;
+ }
+
+ // Read the new entry.
+ const auto entry_size = m_tree->m_entry_size;
+ const auto entry_offset = impl::GetBucketTreeEntryOffset(
+ m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
+ m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
+
+ // Note that we changed index.
+ m_entry_index = entry_index;
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::MovePrevious() {
+ R_UNLESS(this->IsValid(), ResultOutOfRange);
+
+ // Invalidate our index, and read the header for the previous index.
+ auto entry_index = m_entry_index;
+ if (entry_index == 0) {
+ R_UNLESS(m_entry_set.info.index > 0, ResultOutOfRange);
+
+ m_entry_index = -1;
+
+ const auto start = m_entry_set.info.start;
+
+ const auto entry_set_size = m_tree->m_node_size;
+ const auto entry_set_index = m_entry_set.info.index - 1;
+ const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
+
+ m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
+ R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
+
+ R_UNLESS(m_entry_set.info.end == start && m_entry_set.info.start < m_entry_set.info.end,
+ ResultInvalidBucketTreeEntrySetOffset);
+
+ entry_index = m_entry_set.info.count;
+ } else {
+ m_entry_index = -1;
+ }
+
+ --entry_index;
+
+ // Read the new entry.
+ const auto entry_size = m_tree->m_entry_size;
+ const auto entry_offset = impl::GetBucketTreeEntryOffset(
+ m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
+ m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
+
+ // Note that we changed index.
+ m_entry_index = entry_index;
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::Find(s64 virtual_address) {
+ ASSERT(m_tree != nullptr);
+
+ // Get the node.
+ const auto* const node = m_tree->m_node_l1.Get<Node>();
+ R_UNLESS(virtual_address < node->GetEndOffset(), ResultOutOfRange);
+
+ // Get the entry set index.
+ s32 entry_set_index = -1;
+ if (m_tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) {
+ const auto start = node->GetEnd();
+ const auto end = node->GetBegin() + m_tree->m_offset_count;
+
+ auto pos = std::upper_bound(start, end, virtual_address);
+ R_UNLESS(start < pos, ResultOutOfRange);
+ --pos;
+
+ entry_set_index = static_cast<s32>(pos - start);
+ } else {
+ const auto start = node->GetBegin();
+ const auto end = node->GetEnd();
+
+ auto pos = std::upper_bound(start, end, virtual_address);
+ R_UNLESS(start < pos, ResultOutOfRange);
+ --pos;
+
+ if (m_tree->IsExistL2()) {
+ const auto node_index = static_cast<s32>(pos - start);
+ R_UNLESS(0 <= node_index && node_index < m_tree->m_offset_count,
+ ResultInvalidBucketTreeNodeOffset);
+
+ R_TRY(this->FindEntrySet(std::addressof(entry_set_index), virtual_address, node_index));
+ } else {
+ entry_set_index = static_cast<s32>(pos - start);
+ }
+ }
+
+ // Validate the entry set index.
+ R_UNLESS(0 <= entry_set_index && entry_set_index < m_tree->m_entry_set_count,
+ ResultInvalidBucketTreeNodeOffset);
+
+ // Find the entry.
+ R_TRY(this->FindEntry(virtual_address, entry_set_index));
+
+ // Set count.
+ m_entry_set_count = m_tree->m_entry_set_count;
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index) {
+ const auto node_size = m_tree->m_node_size;
+
+ PooledBuffer pool(node_size, 1);
+ if (node_size <= pool.GetSize()) {
+ R_RETURN(
+ this->FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.GetBuffer()));
+ } else {
+ pool.Deallocate();
+ R_RETURN(this->FindEntrySetWithoutBuffer(out_index, virtual_address, node_index));
+ }
+}
+
+Result BucketTree::Visitor::FindEntrySetWithBuffer(s32* out_index, s64 virtual_address,
+ s32 node_index, char* buffer) {
+ // Calculate node extents.
+ const auto node_size = m_tree->m_node_size;
+ const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
+ VirtualFile storage = m_tree->m_node_storage;
+
+ // Read the node.
+ storage->Read(reinterpret_cast<u8*>(buffer), node_size, node_offset);
+
+ // Validate the header.
+ NodeHeader header;
+ std::memcpy(std::addressof(header), buffer, NodeHeaderSize);
+ R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
+
+ // Create the node, and find.
+ StorageNode node(sizeof(s64), header.count);
+ node.Find(buffer, virtual_address);
+ R_UNLESS(node.GetIndex() >= 0, ResultInvalidBucketTreeVirtualOffset);
+
+ // Return the index.
+ *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address,
+ s32 node_index) {
+ // Calculate node extents.
+ const auto node_size = m_tree->m_node_size;
+ const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
+ VirtualFile storage = m_tree->m_node_storage;
+
+ // Read and validate the header.
+ NodeHeader header;
+ storage->ReadObject(std::addressof(header), node_offset);
+ R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
+
+ // Create the node, and find.
+ StorageNode node(node_offset, sizeof(s64), header.count);
+ R_TRY(node.Find(storage, virtual_address));
+ R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
+
+ // Return the index.
+ *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) {
+ const auto entry_set_size = m_tree->m_node_size;
+
+ PooledBuffer pool(entry_set_size, 1);
+ if (entry_set_size <= pool.GetSize()) {
+ R_RETURN(this->FindEntryWithBuffer(virtual_address, entry_set_index, pool.GetBuffer()));
+ } else {
+ pool.Deallocate();
+ R_RETURN(this->FindEntryWithoutBuffer(virtual_address, entry_set_index));
+ }
+}
+
+Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index,
+ char* buffer) {
+ // Calculate entry set extents.
+ const auto entry_size = m_tree->m_entry_size;
+ const auto entry_set_size = m_tree->m_node_size;
+ const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
+ VirtualFile storage = m_tree->m_entry_storage;
+
+ // Read the entry set.
+ storage->Read(reinterpret_cast<u8*>(buffer), entry_set_size, entry_set_offset);
+
+ // Validate the entry_set.
+ EntrySetHeader entry_set;
+ std::memcpy(std::addressof(entry_set), buffer, sizeof(EntrySetHeader));
+ R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
+
+ // Create the node, and find.
+ StorageNode node(entry_size, entry_set.info.count);
+ node.Find(buffer, virtual_address);
+ R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
+
+ // Copy the data into entry.
+ const auto entry_index = node.GetIndex();
+ const auto entry_offset = impl::GetBucketTreeEntryOffset(0, entry_size, entry_index);
+ std::memcpy(m_entry, buffer + entry_offset, entry_size);
+
+ // Set our entry set/index.
+ m_entry_set = entry_set;
+ m_entry_index = entry_index;
+
+ R_SUCCEED();
+}
+
+Result BucketTree::Visitor::FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index) {
+ // Calculate entry set extents.
+ const auto entry_size = m_tree->m_entry_size;
+ const auto entry_set_size = m_tree->m_node_size;
+ const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
+ VirtualFile storage = m_tree->m_entry_storage;
+
+ // Read and validate the entry_set.
+ EntrySetHeader entry_set;
+ storage->ReadObject(std::addressof(entry_set), entry_set_offset);
+ R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
+
+ // Create the node, and find.
+ StorageNode node(entry_set_offset, entry_size, entry_set.info.count);
+ R_TRY(node.Find(storage, virtual_address));
+ R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
+
+ // Copy the data into entry.
+ const auto entry_index = node.GetIndex();
+ const auto entry_offset =
+ impl::GetBucketTreeEntryOffset(entry_set_offset, entry_size, entry_index);
+ storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
+
+ // Set our entry set/index.
+ m_entry_set = entry_set;
+ m_entry_index = entry_index;
+
+ R_SUCCEED();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.h b/src/core/file_sys/fssystem/fssystem_bucket_tree.h
new file mode 100644
index 000000000..46850cd48
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.h
@@ -0,0 +1,416 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <mutex>
+
+#include "common/alignment.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/literals.h"
+
+#include "core/file_sys/vfs.h"
+#include "core/hle/result.h"
+
+namespace FileSys {
+
+using namespace Common::Literals;
+
+class BucketTree {
+ YUZU_NON_COPYABLE(BucketTree);
+ YUZU_NON_MOVEABLE(BucketTree);
+
+public:
+ static constexpr u32 Magic = Common::MakeMagic('B', 'K', 'T', 'R');
+ static constexpr u32 Version = 1;
+
+ static constexpr size_t NodeSizeMin = 1_KiB;
+ static constexpr size_t NodeSizeMax = 512_KiB;
+
+public:
+ class Visitor;
+
+ struct Header {
+ u32 magic;
+ u32 version;
+ s32 entry_count;
+ s32 reserved;
+
+ void Format(s32 entry_count);
+ Result Verify() const;
+ };
+ static_assert(std::is_trivial_v<Header>);
+ static_assert(sizeof(Header) == 0x10);
+
+ struct NodeHeader {
+ s32 index;
+ s32 count;
+ s64 offset;
+
+ Result Verify(s32 node_index, size_t node_size, size_t entry_size) const;
+ };
+ static_assert(std::is_trivial_v<NodeHeader>);
+ static_assert(sizeof(NodeHeader) == 0x10);
+
+ struct Offsets {
+ s64 start_offset;
+ s64 end_offset;
+
+ constexpr bool IsInclude(s64 offset) const {
+ return this->start_offset <= offset && offset < this->end_offset;
+ }
+
+ constexpr bool IsInclude(s64 offset, s64 size) const {
+ return size > 0 && this->start_offset <= offset && size <= (this->end_offset - offset);
+ }
+ };
+ static_assert(std::is_trivial_v<Offsets>);
+ static_assert(sizeof(Offsets) == 0x10);
+
+ struct OffsetCache {
+ Offsets offsets;
+ std::mutex mutex;
+ bool is_initialized;
+
+ OffsetCache() : offsets{-1, -1}, mutex(), is_initialized(false) {}
+ };
+
+ class ContinuousReadingInfo {
+ public:
+ constexpr ContinuousReadingInfo() : m_read_size(), m_skip_count(), m_done() {}
+
+ constexpr void Reset() {
+ m_read_size = 0;
+ m_skip_count = 0;
+ m_done = false;
+ }
+
+ constexpr void SetSkipCount(s32 count) {
+ ASSERT(count >= 0);
+ m_skip_count = count;
+ }
+ constexpr s32 GetSkipCount() const {
+ return m_skip_count;
+ }
+ constexpr bool CheckNeedScan() {
+ return (--m_skip_count) <= 0;
+ }
+
+ constexpr void Done() {
+ m_read_size = 0;
+ m_done = true;
+ }
+ constexpr bool IsDone() const {
+ return m_done;
+ }
+
+ constexpr void SetReadSize(size_t size) {
+ m_read_size = size;
+ }
+ constexpr size_t GetReadSize() const {
+ return m_read_size;
+ }
+ constexpr bool CanDo() const {
+ return m_read_size > 0;
+ }
+
+ private:
+ size_t m_read_size;
+ s32 m_skip_count;
+ bool m_done;
+ };
+
+private:
+ class NodeBuffer {
+ YUZU_NON_COPYABLE(NodeBuffer);
+
+ public:
+ NodeBuffer() : m_header() {}
+
+ ~NodeBuffer() {
+ ASSERT(m_header == nullptr);
+ }
+
+ NodeBuffer(NodeBuffer&& rhs) : m_header(rhs.m_header) {
+ rhs.m_header = nullptr;
+ }
+
+ NodeBuffer& operator=(NodeBuffer&& rhs) {
+ if (this != std::addressof(rhs)) {
+ ASSERT(m_header == nullptr);
+
+ m_header = rhs.m_header;
+
+ rhs.m_header = nullptr;
+ }
+ return *this;
+ }
+
+ bool Allocate(size_t node_size) {
+ ASSERT(m_header == nullptr);
+
+ m_header = ::operator new(node_size, std::align_val_t{sizeof(s64)});
+
+ // ASSERT(Common::IsAligned(m_header, sizeof(s64)));
+
+ return m_header != nullptr;
+ }
+
+ void Free(size_t node_size) {
+ if (m_header) {
+ ::operator delete(m_header, std::align_val_t{sizeof(s64)});
+ m_header = nullptr;
+ }
+ }
+
+ void FillZero(size_t node_size) const {
+ if (m_header) {
+ std::memset(m_header, 0, node_size);
+ }
+ }
+
+ NodeHeader* Get() const {
+ return reinterpret_cast<NodeHeader*>(m_header);
+ }
+
+ NodeHeader* operator->() const {
+ return this->Get();
+ }
+
+ template <typename T>
+ T* Get() const {
+ static_assert(std::is_trivial_v<T>);
+ static_assert(sizeof(T) == sizeof(NodeHeader));
+ return reinterpret_cast<T*>(m_header);
+ }
+
+ private:
+ void* m_header;
+ };
+
+private:
+ static constexpr s32 GetEntryCount(size_t node_size, size_t entry_size) {
+ return static_cast<s32>((node_size - sizeof(NodeHeader)) / entry_size);
+ }
+
+ static constexpr s32 GetOffsetCount(size_t node_size) {
+ return static_cast<s32>((node_size - sizeof(NodeHeader)) / sizeof(s64));
+ }
+
+ static constexpr s32 GetEntrySetCount(size_t node_size, size_t entry_size, s32 entry_count) {
+ const s32 entry_count_per_node = GetEntryCount(node_size, entry_size);
+ return Common::DivideUp(entry_count, entry_count_per_node);
+ }
+
+ static constexpr s32 GetNodeL2Count(size_t node_size, size_t entry_size, s32 entry_count) {
+ const s32 offset_count_per_node = GetOffsetCount(node_size);
+ const s32 entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
+
+ if (entry_set_count <= offset_count_per_node) {
+ return 0;
+ }
+
+ const s32 node_l2_count = Common::DivideUp(entry_set_count, offset_count_per_node);
+ ASSERT(node_l2_count <= offset_count_per_node);
+
+ return Common::DivideUp(entry_set_count - (offset_count_per_node - (node_l2_count - 1)),
+ offset_count_per_node);
+ }
+
+public:
+ BucketTree()
+ : m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(),
+ m_entry_count(), m_offset_count(), m_entry_set_count(), m_offset_cache() {}
+ ~BucketTree() {
+ this->Finalize();
+ }
+
+ Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
+ size_t entry_size, s32 entry_count);
+ void Initialize(size_t node_size, s64 end_offset);
+ void Finalize();
+
+ bool IsInitialized() const {
+ return m_node_size > 0;
+ }
+ bool IsEmpty() const {
+ return m_entry_size == 0;
+ }
+
+ Result Find(Visitor* visitor, s64 virtual_address);
+ Result InvalidateCache();
+
+ s32 GetEntryCount() const {
+ return m_entry_count;
+ }
+
+ Result GetOffsets(Offsets* out) {
+ // Ensure we have an offset cache.
+ R_TRY(this->EnsureOffsetCache());
+
+ // Set the output.
+ *out = m_offset_cache.offsets;
+ R_SUCCEED();
+ }
+
+public:
+ static constexpr s64 QueryHeaderStorageSize() {
+ return sizeof(Header);
+ }
+
+ static constexpr s64 QueryNodeStorageSize(size_t node_size, size_t entry_size,
+ s32 entry_count) {
+ ASSERT(entry_size >= sizeof(s64));
+ ASSERT(node_size >= entry_size + sizeof(NodeHeader));
+ ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
+ ASSERT(Common::IsPowerOfTwo(node_size));
+ ASSERT(entry_count >= 0);
+
+ if (entry_count <= 0) {
+ return 0;
+ }
+ return (1 + GetNodeL2Count(node_size, entry_size, entry_count)) *
+ static_cast<s64>(node_size);
+ }
+
+ static constexpr s64 QueryEntryStorageSize(size_t node_size, size_t entry_size,
+ s32 entry_count) {
+ ASSERT(entry_size >= sizeof(s64));
+ ASSERT(node_size >= entry_size + sizeof(NodeHeader));
+ ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
+ ASSERT(Common::IsPowerOfTwo(node_size));
+ ASSERT(entry_count >= 0);
+
+ if (entry_count <= 0) {
+ return 0;
+ }
+ return GetEntrySetCount(node_size, entry_size, entry_count) * static_cast<s64>(node_size);
+ }
+
+private:
+ template <typename EntryType>
+ struct ContinuousReadingParam {
+ s64 offset;
+ size_t size;
+ NodeHeader entry_set;
+ s32 entry_index;
+ Offsets offsets;
+ EntryType entry;
+ };
+
+private:
+ template <typename EntryType>
+ Result ScanContinuousReading(ContinuousReadingInfo* out_info,
+ const ContinuousReadingParam<EntryType>& param) const;
+
+ bool IsExistL2() const {
+ return m_offset_count < m_entry_set_count;
+ }
+ bool IsExistOffsetL2OnL1() const {
+ return this->IsExistL2() && m_node_l1->count < m_offset_count;
+ }
+
+ s64 GetEntrySetIndex(s32 node_index, s32 offset_index) const {
+ return (m_offset_count - m_node_l1->count) + (m_offset_count * node_index) + offset_index;
+ }
+
+ Result EnsureOffsetCache();
+
+private:
+ mutable VirtualFile m_node_storage;
+ mutable VirtualFile m_entry_storage;
+ NodeBuffer m_node_l1;
+ size_t m_node_size;
+ size_t m_entry_size;
+ s32 m_entry_count;
+ s32 m_offset_count;
+ s32 m_entry_set_count;
+ OffsetCache m_offset_cache;
+};
+
+class BucketTree::Visitor {
+ YUZU_NON_COPYABLE(Visitor);
+ YUZU_NON_MOVEABLE(Visitor);
+
+public:
+ constexpr Visitor()
+ : m_tree(), m_entry(), m_entry_index(-1), m_entry_set_count(), m_entry_set{} {}
+ ~Visitor() {
+ if (m_entry != nullptr) {
+ ::operator delete(m_entry, m_tree->m_entry_size);
+ m_tree = nullptr;
+ m_entry = nullptr;
+ }
+ }
+
+ bool IsValid() const {
+ return m_entry_index >= 0;
+ }
+ bool CanMoveNext() const {
+ return this->IsValid() && (m_entry_index + 1 < m_entry_set.info.count ||
+ m_entry_set.info.index + 1 < m_entry_set_count);
+ }
+ bool CanMovePrevious() const {
+ return this->IsValid() && (m_entry_index > 0 || m_entry_set.info.index > 0);
+ }
+
+ Result MoveNext();
+ Result MovePrevious();
+
+ template <typename EntryType>
+ Result ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, size_t size) const;
+
+ const void* Get() const {
+ ASSERT(this->IsValid());
+ return m_entry;
+ }
+
+ template <typename T>
+ const T* Get() const {
+ ASSERT(this->IsValid());
+ return reinterpret_cast<const T*>(m_entry);
+ }
+
+ const BucketTree* GetTree() const {
+ return m_tree;
+ }
+
+private:
+ Result Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets);
+
+ Result Find(s64 virtual_address);
+
+ Result FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index);
+ Result FindEntrySetWithBuffer(s32* out_index, s64 virtual_address, s32 node_index,
+ char* buffer);
+ Result FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address, s32 node_index);
+
+ Result FindEntry(s64 virtual_address, s32 entry_set_index);
+ Result FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, char* buffer);
+ Result FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index);
+
+private:
+ friend class BucketTree;
+
+ union EntrySetHeader {
+ NodeHeader header;
+ struct Info {
+ s32 index;
+ s32 count;
+ s64 end;
+ s64 start;
+ } info;
+ static_assert(std::is_trivial_v<Info>);
+ };
+ static_assert(std::is_trivial_v<EntrySetHeader>);
+
+ const BucketTree* m_tree;
+ BucketTree::Offsets m_offsets;
+ void* m_entry;
+ s32 m_entry_index;
+ s32 m_entry_set_count;
+ EntrySetHeader m_entry_set;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
new file mode 100644
index 000000000..030b2916b
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
@@ -0,0 +1,170 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+
+namespace FileSys {
+
+template <typename EntryType>
+Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
+ const ContinuousReadingParam<EntryType>& param) const {
+ static_assert(std::is_trivial_v<ContinuousReadingParam<EntryType>>);
+
+ // Validate our preconditions.
+ ASSERT(this->IsInitialized());
+ ASSERT(out_info != nullptr);
+ ASSERT(m_entry_size == sizeof(EntryType));
+
+ // Reset the output.
+ out_info->Reset();
+
+ // If there's nothing to read, we're done.
+ R_SUCCEED_IF(param.size == 0);
+
+ // If we're reading a fragment, we're done.
+ R_SUCCEED_IF(param.entry.IsFragment());
+
+ // Validate the first entry.
+ auto entry = param.entry;
+ auto cur_offset = param.offset;
+ R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange);
+
+ // Create a pooled buffer for our scan.
+ PooledBuffer pool(m_node_size, 1);
+ char* buffer = nullptr;
+
+ s64 entry_storage_size = m_entry_storage->GetSize();
+
+ // Read the node.
+ if (m_node_size <= pool.GetSize()) {
+ buffer = pool.GetBuffer();
+ const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size);
+ R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size),
+ ResultInvalidBucketTreeNodeEntryCount);
+
+ m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs);
+ }
+
+ // Calculate extents.
+ const auto end_offset = cur_offset + static_cast<s64>(param.size);
+ s64 phys_offset = entry.GetPhysicalOffset();
+
+ // Start merge tracking.
+ s64 merge_size = 0;
+ s64 readable_size = 0;
+ bool merged = false;
+
+ // Iterate.
+ auto entry_index = param.entry_index;
+ for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) {
+ // If we're past the end, we're done.
+ if (end_offset <= cur_offset) {
+ break;
+ }
+
+ // Validate the entry offset.
+ const auto entry_offset = entry.GetVirtualOffset();
+ R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
+
+ // Get the next entry.
+ EntryType next_entry = {};
+ s64 next_entry_offset;
+
+ if (entry_index + 1 < entry_count) {
+ if (buffer != nullptr) {
+ const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
+ std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size);
+ } else {
+ const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size,
+ m_entry_size, entry_index + 1);
+ m_entry_storage->ReadObject(std::addressof(next_entry), ofs);
+ }
+
+ next_entry_offset = next_entry.GetVirtualOffset();
+ R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
+ } else {
+ next_entry_offset = param.entry_set.offset;
+ }
+
+ // Validate the next entry offset.
+ R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
+
+ // Determine the much data there is.
+ const auto data_size = next_entry_offset - cur_offset;
+ ASSERT(data_size > 0);
+
+ // Determine how much data we should read.
+ const auto remaining_size = end_offset - cur_offset;
+ const size_t read_size = static_cast<size_t>(std::min(data_size, remaining_size));
+ ASSERT(read_size <= param.size);
+
+ // Update our merge tracking.
+ if (entry.IsFragment()) {
+ // If we can't merge, stop looping.
+ if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) {
+ break;
+ }
+
+ // Otherwise, add the current size to the merge size.
+ merge_size += read_size;
+ } else {
+ // If we can't merge, stop looping.
+ if (phys_offset != entry.GetPhysicalOffset()) {
+ break;
+ }
+
+ // Add the size to the readable amount.
+ readable_size += merge_size + read_size;
+ ASSERT(readable_size <= static_cast<s64>(param.size));
+
+ // Update whether we've merged.
+ merged |= merge_size > 0;
+ merge_size = 0;
+ }
+
+ // Advance.
+ cur_offset += read_size;
+ ASSERT(cur_offset <= end_offset);
+
+ phys_offset += next_entry_offset - entry_offset;
+ entry = next_entry;
+ }
+
+ // If we merged, set our readable size.
+ if (merged) {
+ out_info->SetReadSize(static_cast<size_t>(readable_size));
+ }
+ out_info->SetSkipCount(entry_index - param.entry_index);
+
+ R_SUCCEED();
+}
+
+template <typename EntryType>
+Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset,
+ size_t size) const {
+ static_assert(std::is_trivial_v<EntryType>);
+ ASSERT(this->IsValid());
+
+ // Create our parameters.
+ ContinuousReadingParam<EntryType> param = {
+ .offset = offset,
+ .size = size,
+ .entry_set = m_entry_set.header,
+ .entry_index = m_entry_index,
+ .offsets{},
+ .entry{},
+ };
+ std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets),
+ sizeof(BucketTree::Offsets));
+ std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));
+
+ // Scan.
+ R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param));
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h
new file mode 100644
index 000000000..5503613fc
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h
@@ -0,0 +1,110 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
+
+namespace FileSys::impl {
+
+class SafeValue {
+public:
+ static s64 GetInt64(const void* ptr) {
+ s64 value;
+ std::memcpy(std::addressof(value), ptr, sizeof(s64));
+ return value;
+ }
+
+ static s64 GetInt64(const s64* ptr) {
+ return GetInt64(static_cast<const void*>(ptr));
+ }
+
+ static s64 GetInt64(const s64& v) {
+ return GetInt64(std::addressof(v));
+ }
+
+ static void SetInt64(void* dst, const void* src) {
+ std::memcpy(dst, src, sizeof(s64));
+ }
+
+ static void SetInt64(void* dst, const s64* src) {
+ return SetInt64(dst, static_cast<const void*>(src));
+ }
+
+ static void SetInt64(void* dst, const s64& v) {
+ return SetInt64(dst, std::addressof(v));
+ }
+};
+
+template <typename IteratorType>
+struct BucketTreeNode {
+ using Header = BucketTree::NodeHeader;
+
+ Header header;
+
+ s32 GetCount() const {
+ return this->header.count;
+ }
+
+ void* GetArray() {
+ return std::addressof(this->header) + 1;
+ }
+ template <typename T>
+ T* GetArray() {
+ return reinterpret_cast<T*>(this->GetArray());
+ }
+ const void* GetArray() const {
+ return std::addressof(this->header) + 1;
+ }
+ template <typename T>
+ const T* GetArray() const {
+ return reinterpret_cast<const T*>(this->GetArray());
+ }
+
+ s64 GetBeginOffset() const {
+ return *this->GetArray<s64>();
+ }
+ s64 GetEndOffset() const {
+ return this->header.offset;
+ }
+
+ IteratorType GetBegin() {
+ return IteratorType(this->GetArray<s64>());
+ }
+ IteratorType GetEnd() {
+ return IteratorType(this->GetArray<s64>()) + this->header.count;
+ }
+ IteratorType GetBegin() const {
+ return IteratorType(this->GetArray<s64>());
+ }
+ IteratorType GetEnd() const {
+ return IteratorType(this->GetArray<s64>()) + this->header.count;
+ }
+
+ IteratorType GetBegin(size_t entry_size) {
+ return IteratorType(this->GetArray(), entry_size);
+ }
+ IteratorType GetEnd(size_t entry_size) {
+ return IteratorType(this->GetArray(), entry_size) + this->header.count;
+ }
+ IteratorType GetBegin(size_t entry_size) const {
+ return IteratorType(this->GetArray(), entry_size);
+ }
+ IteratorType GetEnd(size_t entry_size) const {
+ return IteratorType(this->GetArray(), entry_size) + this->header.count;
+ }
+};
+
+constexpr inline s64 GetBucketTreeEntryOffset(s64 entry_set_offset, size_t entry_size,
+ s32 entry_index) {
+ return entry_set_offset + sizeof(BucketTree::NodeHeader) +
+ entry_index * static_cast<s64>(entry_size);
+}
+
+constexpr inline s64 GetBucketTreeEntryOffset(s32 entry_set_index, size_t node_size,
+ size_t entry_size, s32 entry_index) {
+ return GetBucketTreeEntryOffset(entry_set_index * static_cast<s64>(node_size), entry_size,
+ entry_index);
+}
+
+} // namespace FileSys::impl
diff --git a/src/core/file_sys/fssystem/fssystem_compressed_storage.h b/src/core/file_sys/fssystem/fssystem_compressed_storage.h
new file mode 100644
index 000000000..33d93938e
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compressed_storage.h
@@ -0,0 +1,963 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/literals.h"
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
+#include "core/file_sys/fssystem/fssystem_compression_common.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+#include "core/file_sys/vfs.h"
+
+namespace FileSys {
+
+using namespace Common::Literals;
+
+class CompressedStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(CompressedStorage);
+ YUZU_NON_MOVEABLE(CompressedStorage);
+
+public:
+ static constexpr size_t NodeSize = 16_KiB;
+
+ struct Entry {
+ s64 virt_offset;
+ s64 phys_offset;
+ CompressionType compression_type;
+ s32 phys_size;
+
+ s64 GetPhysicalSize() const {
+ return this->phys_size;
+ }
+ };
+ static_assert(std::is_trivial_v<Entry>);
+ static_assert(sizeof(Entry) == 0x18);
+
+public:
+ static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
+ return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
+ }
+
+ static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
+ return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
+ }
+
+private:
+ class CompressedStorageCore {
+ YUZU_NON_COPYABLE(CompressedStorageCore);
+ YUZU_NON_MOVEABLE(CompressedStorageCore);
+
+ public:
+ CompressedStorageCore() : m_table(), m_data_storage() {}
+
+ ~CompressedStorageCore() {
+ this->Finalize();
+ }
+
+ public:
+ Result Initialize(VirtualFile data_storage, VirtualFile node_storage,
+ VirtualFile entry_storage, s32 bktr_entry_count, size_t block_size_max,
+ size_t continuous_reading_size_max,
+ GetDecompressorFunction get_decompressor) {
+ // Check pre-conditions.
+ ASSERT(0 < block_size_max);
+ ASSERT(block_size_max <= continuous_reading_size_max);
+ ASSERT(get_decompressor != nullptr);
+
+ // Initialize our entry table.
+ R_TRY(m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry),
+ bktr_entry_count));
+
+ // Set our other fields.
+ m_block_size_max = block_size_max;
+ m_continuous_reading_size_max = continuous_reading_size_max;
+ m_data_storage = data_storage;
+ m_get_decompressor_function = get_decompressor;
+
+ R_SUCCEED();
+ }
+
+ void Finalize() {
+ if (this->IsInitialized()) {
+ m_table.Finalize();
+ m_data_storage = VirtualFile();
+ }
+ }
+
+ VirtualFile GetDataStorage() {
+ return m_data_storage;
+ }
+
+ Result GetDataStorageSize(s64* out) {
+ // Check pre-conditions.
+ ASSERT(out != nullptr);
+
+ // Get size.
+ *out = m_data_storage->GetSize();
+
+ R_SUCCEED();
+ }
+
+ BucketTree& GetEntryTable() {
+ return m_table;
+ }
+
+ Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count,
+ s64 offset, s64 size) {
+ // Check pre-conditions.
+ ASSERT(offset >= 0);
+ ASSERT(size >= 0);
+ ASSERT(this->IsInitialized());
+
+ // Check that we can output the count.
+ R_UNLESS(out_read_count != nullptr, ResultNullptrArgument);
+
+ // Check that we have anything to read at all.
+ R_SUCCEED_IF(size == 0);
+
+ // Check that either we have a buffer, or this is to determine how many we need.
+ if (max_entry_count != 0) {
+ R_UNLESS(out_entries != nullptr, ResultNullptrArgument);
+ }
+
+ // Get the table offsets.
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ // Validate arguments.
+ R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
+
+ // Find the offset in our tree.
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get<Entry>()->virt_offset;
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
+ ResultUnexpectedInCompressedStorageA);
+ }
+
+ // Get the entries.
+ const auto end_offset = offset + size;
+ s32 read_count = 0;
+ while (visitor.Get<Entry>()->virt_offset < end_offset) {
+ // If we should be setting the output, do so.
+ if (max_entry_count != 0) {
+ // Ensure we only read as many entries as we can.
+ if (read_count >= max_entry_count) {
+ break;
+ }
+
+ // Set the current output entry.
+ out_entries[read_count] = *visitor.Get<Entry>();
+ }
+
+ // Increase the read count.
+ ++read_count;
+
+ // If we're at the end, we're done.
+ if (!visitor.CanMoveNext()) {
+ break;
+ }
+
+ // Move to the next entry.
+ R_TRY(visitor.MoveNext());
+ }
+
+ // Set the output read count.
+ *out_read_count = read_count;
+ R_SUCCEED();
+ }
+
+ Result GetSize(s64* out) {
+ // Check pre-conditions.
+ ASSERT(out != nullptr);
+
+ // Get our table offsets.
+ BucketTree::Offsets offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(offsets)));
+
+ // Set the output.
+ *out = offsets.end_offset;
+ R_SUCCEED();
+ }
+
+ Result OperatePerEntry(s64 offset, s64 size, auto f) {
+ // Check pre-conditions.
+ ASSERT(offset >= 0);
+ ASSERT(size >= 0);
+ ASSERT(this->IsInitialized());
+
+ // Succeed if there's nothing to operate on.
+ R_SUCCEED_IF(size == 0);
+
+ // Get the table offsets.
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ // Validate arguments.
+ R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
+
+ // Find the offset in our tree.
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get<Entry>()->virt_offset;
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
+ ResultUnexpectedInCompressedStorageA);
+ }
+
+ // Prepare to operate in chunks.
+ auto cur_offset = offset;
+ const auto end_offset = offset + static_cast<s64>(size);
+
+ while (cur_offset < end_offset) {
+ // Get the current entry.
+ const auto cur_entry = *visitor.Get<Entry>();
+
+ // Get and validate the entry's offset.
+ const auto cur_entry_offset = cur_entry.virt_offset;
+ R_UNLESS(cur_entry_offset <= cur_offset, ResultUnexpectedInCompressedStorageA);
+
+ // Get and validate the next entry offset.
+ s64 next_entry_offset;
+ if (visitor.CanMoveNext()) {
+ R_TRY(visitor.MoveNext());
+ next_entry_offset = visitor.Get<Entry>()->virt_offset;
+ R_UNLESS(table_offsets.IsInclude(next_entry_offset),
+ ResultUnexpectedInCompressedStorageA);
+ } else {
+ next_entry_offset = table_offsets.end_offset;
+ }
+ R_UNLESS(cur_offset < next_entry_offset, ResultUnexpectedInCompressedStorageA);
+
+ // Get the offset of the entry in the data we read.
+ const auto data_offset = cur_offset - cur_entry_offset;
+ const auto data_size = (next_entry_offset - cur_entry_offset);
+ ASSERT(data_size > 0);
+
+ // Determine how much is left.
+ const auto remaining_size = end_offset - cur_offset;
+ const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
+ ASSERT(cur_size <= size);
+
+ // Get the data storage size.
+ s64 storage_size = m_data_storage->GetSize();
+
+ // Check that our read remains naively physically in bounds.
+ R_UNLESS(0 <= cur_entry.phys_offset && cur_entry.phys_offset <= storage_size,
+ ResultUnexpectedInCompressedStorageC);
+
+ // If we have any compression, verify that we remain physically in bounds.
+ if (cur_entry.compression_type != CompressionType::None) {
+ R_UNLESS(cur_entry.phys_offset + cur_entry.GetPhysicalSize() <= storage_size,
+ ResultUnexpectedInCompressedStorageC);
+ }
+
+ // Check that block alignment requirements are met.
+ if (CompressionTypeUtility::IsBlockAlignmentRequired(cur_entry.compression_type)) {
+ R_UNLESS(Common::IsAligned(cur_entry.phys_offset, CompressionBlockAlignment),
+ ResultUnexpectedInCompressedStorageA);
+ }
+
+ // Invoke the operator.
+ bool is_continuous = true;
+ R_TRY(
+ f(std::addressof(is_continuous), cur_entry, data_size, data_offset, cur_size));
+
+ // If not continuous, we're done.
+ if (!is_continuous) {
+ break;
+ }
+
+ // Advance.
+ cur_offset += cur_size;
+ }
+
+ R_SUCCEED();
+ }
+
+ public:
+ using ReadImplFunction = std::function<Result(void*, size_t)>;
+ using ReadFunction = std::function<Result(size_t, const ReadImplFunction&)>;
+
+ public:
+ Result Read(s64 offset, s64 size, const ReadFunction& read_func) {
+ // Check pre-conditions.
+ ASSERT(offset >= 0);
+ ASSERT(this->IsInitialized());
+
+ // Succeed immediately, if we have nothing to read.
+ R_SUCCEED_IF(size == 0);
+
+ // Declare read lambda.
+ constexpr int EntriesCountMax = 0x80;
+ struct Entries {
+ CompressionType compression_type;
+ u32 gap_from_prev;
+ u32 physical_size;
+ u32 virtual_size;
+ };
+ std::array<Entries, EntriesCountMax> entries;
+ s32 entry_count = 0;
+ Entry prev_entry = {
+ .virt_offset = -1,
+ .phys_offset{},
+ .compression_type{},
+ .phys_size{},
+ };
+ bool will_allocate_pooled_buffer = false;
+ s64 required_access_physical_offset = 0;
+ s64 required_access_physical_size = 0;
+
+ auto PerformRequiredRead = [&]() -> Result {
+ // If there are no entries, we have nothing to do.
+ R_SUCCEED_IF(entry_count == 0);
+
+ // Get the remaining size in a convenient form.
+ const size_t total_required_size =
+ static_cast<size_t>(required_access_physical_size);
+
+ // Perform the read based on whether we need to allocate a buffer.
+ if (will_allocate_pooled_buffer) {
+ // Allocate a pooled buffer.
+ PooledBuffer pooled_buffer;
+ if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) {
+ pooled_buffer.Allocate(total_required_size, m_block_size_max);
+ } else {
+ pooled_buffer.AllocateParticularlyLarge(
+ std::min<size_t>(
+ total_required_size,
+ PooledBuffer::GetAllocatableParticularlyLargeSizeMax()),
+ m_block_size_max);
+ }
+
+ // Read each of the entries.
+ for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
+ // Determine the current read size.
+ bool will_use_pooled_buffer = false;
+ const size_t cur_read_size = [&]() -> size_t {
+ if (const size_t target_entry_size =
+ static_cast<size_t>(entries[entry_idx].physical_size) +
+ static_cast<size_t>(entries[entry_idx].gap_from_prev);
+ target_entry_size <= pooled_buffer.GetSize()) {
+ // We'll be using the pooled buffer.
+ will_use_pooled_buffer = true;
+
+ // Determine how much we can read.
+ const size_t max_size = std::min<size_t>(
+ required_access_physical_size, pooled_buffer.GetSize());
+
+ size_t read_size = 0;
+ for (auto n = entry_idx; n < entry_count; ++n) {
+ const size_t cur_entry_size =
+ static_cast<size_t>(entries[n].physical_size) +
+ static_cast<size_t>(entries[n].gap_from_prev);
+ if (read_size + cur_entry_size > max_size) {
+ break;
+ }
+
+ read_size += cur_entry_size;
+ }
+
+ return read_size;
+ } else {
+ // If we don't fit, we must be uncompressed.
+ ASSERT(entries[entry_idx].compression_type ==
+ CompressionType::None);
+
+ // We can perform the whole of an uncompressed read directly.
+ return entries[entry_idx].virtual_size;
+ }
+ }();
+
+ // Perform the read based on whether or not we'll use the pooled buffer.
+ if (will_use_pooled_buffer) {
+ // Read the compressed data into the pooled buffer.
+ auto* const buffer = pooled_buffer.GetBuffer();
+ m_data_storage->Read(reinterpret_cast<u8*>(buffer), cur_read_size,
+ required_access_physical_offset);
+
+ // Decompress the data.
+ size_t buffer_offset;
+ for (buffer_offset = 0;
+ entry_idx < entry_count &&
+ ((static_cast<size_t>(entries[entry_idx].physical_size) +
+ static_cast<size_t>(entries[entry_idx].gap_from_prev)) == 0 ||
+ buffer_offset < cur_read_size);
+ buffer_offset += entries[entry_idx++].physical_size) {
+ // Advance by the relevant gap.
+ buffer_offset += entries[entry_idx].gap_from_prev;
+
+ const auto compression_type = entries[entry_idx].compression_type;
+ switch (compression_type) {
+ case CompressionType::None: {
+ // Check that we can remain within bounds.
+ ASSERT(buffer_offset + entries[entry_idx].virtual_size <=
+ cur_read_size);
+
+ // Perform no decompression.
+ R_TRY(read_func(
+ entries[entry_idx].virtual_size,
+ [&](void* dst, size_t dst_size) -> Result {
+ // Check that the size is valid.
+ ASSERT(dst_size == entries[entry_idx].virtual_size);
+
+ // We have no compression, so just copy the data
+ // out.
+ std::memcpy(dst, buffer + buffer_offset,
+ entries[entry_idx].virtual_size);
+ R_SUCCEED();
+ }));
+
+ break;
+ }
+ case CompressionType::Zeros: {
+ // Check that we can remain within bounds.
+ ASSERT(buffer_offset <= cur_read_size);
+
+ // Zero the memory.
+ R_TRY(read_func(
+ entries[entry_idx].virtual_size,
+ [&](void* dst, size_t dst_size) -> Result {
+ // Check that the size is valid.
+ ASSERT(dst_size == entries[entry_idx].virtual_size);
+
+ // The data is zeroes, so zero the buffer.
+ std::memset(dst, 0, entries[entry_idx].virtual_size);
+ R_SUCCEED();
+ }));
+
+ break;
+ }
+ default: {
+ // Check that we can remain within bounds.
+ ASSERT(buffer_offset + entries[entry_idx].physical_size <=
+ cur_read_size);
+
+ // Get the decompressor.
+ const auto decompressor =
+ this->GetDecompressor(compression_type);
+ R_UNLESS(decompressor != nullptr,
+ ResultUnexpectedInCompressedStorageB);
+
+ // Decompress the data.
+ R_TRY(read_func(entries[entry_idx].virtual_size,
+ [&](void* dst, size_t dst_size) -> Result {
+ // Check that the size is valid.
+ ASSERT(dst_size ==
+ entries[entry_idx].virtual_size);
+
+ // Perform the decompression.
+ R_RETURN(decompressor(
+ dst, entries[entry_idx].virtual_size,
+ buffer + buffer_offset,
+ entries[entry_idx].physical_size));
+ }));
+
+ break;
+ }
+ }
+ }
+
+ // Check that we processed the correct amount of data.
+ ASSERT(buffer_offset == cur_read_size);
+ } else {
+ // Account for the gap from the previous entry.
+ required_access_physical_offset += entries[entry_idx].gap_from_prev;
+ required_access_physical_size -= entries[entry_idx].gap_from_prev;
+
+ // We don't need the buffer (as the data is uncompressed), so just
+ // execute the read.
+ R_TRY(
+ read_func(cur_read_size, [&](void* dst, size_t dst_size) -> Result {
+ // Check that the size is valid.
+ ASSERT(dst_size == cur_read_size);
+
+ // Perform the read.
+ m_data_storage->Read(reinterpret_cast<u8*>(dst), cur_read_size,
+ required_access_physical_offset);
+
+ R_SUCCEED();
+ }));
+ }
+
+ // Advance on.
+ required_access_physical_offset += cur_read_size;
+ required_access_physical_size -= cur_read_size;
+ }
+
+ // Verify that we have nothing remaining to read.
+ ASSERT(required_access_physical_size == 0);
+
+ R_SUCCEED();
+ } else {
+ // We don't need a buffer, so just execute the read.
+ R_TRY(read_func(total_required_size, [&](void* dst, size_t dst_size) -> Result {
+ // Check that the size is valid.
+ ASSERT(dst_size == total_required_size);
+
+ // Perform the read.
+ m_data_storage->Read(reinterpret_cast<u8*>(dst), total_required_size,
+ required_access_physical_offset);
+
+ R_SUCCEED();
+ }));
+ }
+
+ R_SUCCEED();
+ };
+
+ R_TRY(this->OperatePerEntry(
+ offset, size,
+ [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
+ s64 data_offset, s64 read_size) -> Result {
+ // Determine the physical extents.
+ s64 physical_offset, physical_size;
+ if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
+ physical_offset = entry.phys_offset + data_offset;
+ physical_size = read_size;
+ } else {
+ physical_offset = entry.phys_offset;
+ physical_size = entry.GetPhysicalSize();
+ }
+
+ // If we have a pending data storage operation, perform it if we have to.
+ const s64 required_access_physical_end =
+ required_access_physical_offset + required_access_physical_size;
+ if (required_access_physical_size > 0) {
+ const bool required_by_gap =
+ !(required_access_physical_end <= physical_offset &&
+ physical_offset <= Common::AlignUp(required_access_physical_end,
+ CompressionBlockAlignment));
+ const bool required_by_continuous_size =
+ ((physical_size + physical_offset) - required_access_physical_end) +
+ required_access_physical_size >
+ static_cast<s64>(m_continuous_reading_size_max);
+ const bool required_by_entry_count = entry_count == EntriesCountMax;
+ if (required_by_gap || required_by_continuous_size ||
+ required_by_entry_count) {
+ // Check that our planned access is sane.
+ ASSERT(!will_allocate_pooled_buffer ||
+ required_access_physical_size <=
+ static_cast<s64>(m_continuous_reading_size_max));
+
+ // Perform the required read.
+ const Result rc = PerformRequiredRead();
+ if (R_FAILED(rc)) {
+ R_THROW(rc);
+ }
+
+ // Reset our requirements.
+ prev_entry.virt_offset = -1;
+ required_access_physical_size = 0;
+ entry_count = 0;
+ will_allocate_pooled_buffer = false;
+ }
+ }
+
+ // Sanity check that we're within bounds on entries.
+ ASSERT(entry_count < EntriesCountMax);
+
+ // Determine if a buffer allocation is needed.
+ if (entry.compression_type != CompressionType::None ||
+ (prev_entry.virt_offset >= 0 &&
+ entry.virt_offset - prev_entry.virt_offset !=
+ entry.phys_offset - prev_entry.phys_offset)) {
+ will_allocate_pooled_buffer = true;
+ }
+
+ // If we need to access the data storage, update our required access parameters.
+ if (CompressionTypeUtility::IsDataStorageAccessRequired(
+ entry.compression_type)) {
+ // If the data is compressed, ensure the access is sane.
+ if (entry.compression_type != CompressionType::None) {
+ R_UNLESS(data_offset == 0, ResultInvalidOffset);
+ R_UNLESS(virtual_data_size == read_size, ResultInvalidSize);
+ R_UNLESS(entry.GetPhysicalSize() <= static_cast<s64>(m_block_size_max),
+ ResultUnexpectedInCompressedStorageD);
+ }
+
+ // Update the required access parameters.
+ s64 gap_from_prev;
+ if (required_access_physical_size > 0) {
+ gap_from_prev = physical_offset - required_access_physical_end;
+ } else {
+ gap_from_prev = 0;
+ required_access_physical_offset = physical_offset;
+ }
+ required_access_physical_size += physical_size + gap_from_prev;
+
+ // Create an entry to access the data storage.
+ entries[entry_count++] = {
+ .compression_type = entry.compression_type,
+ .gap_from_prev = static_cast<u32>(gap_from_prev),
+ .physical_size = static_cast<u32>(physical_size),
+ .virtual_size = static_cast<u32>(read_size),
+ };
+ } else {
+ // Verify that we're allowed to be operating on the non-data-storage-access
+ // type.
+ R_UNLESS(entry.compression_type == CompressionType::Zeros,
+ ResultUnexpectedInCompressedStorageB);
+
+ // If we have entries, create a fake entry for the zero region.
+ if (entry_count != 0) {
+ // We need to have a physical size.
+ R_UNLESS(entry.GetPhysicalSize() != 0,
+ ResultUnexpectedInCompressedStorageD);
+
+ // Create a fake entry.
+ entries[entry_count++] = {
+ .compression_type = CompressionType::Zeros,
+ .gap_from_prev = 0,
+ .physical_size = 0,
+ .virtual_size = static_cast<u32>(read_size),
+ };
+ } else {
+ // We have no entries, so we can just perform the read.
+ const Result rc =
+ read_func(static_cast<size_t>(read_size),
+ [&](void* dst, size_t dst_size) -> Result {
+ // Check the space we should zero is correct.
+ ASSERT(dst_size == static_cast<size_t>(read_size));
+
+ // Zero the memory.
+ std::memset(dst, 0, read_size);
+ R_SUCCEED();
+ });
+ if (R_FAILED(rc)) {
+ R_THROW(rc);
+ }
+ }
+ }
+
+ // Set the previous entry.
+ prev_entry = entry;
+
+ // We're continuous.
+ *out_continuous = true;
+ R_SUCCEED();
+ }));
+
+ // If we still have a pending access, perform it.
+ if (required_access_physical_size != 0) {
+ R_TRY(PerformRequiredRead());
+ }
+
+ R_SUCCEED();
+ }
+
+ private:
+ DecompressorFunction GetDecompressor(CompressionType type) const {
+ // Check that we can get a decompressor for the type.
+ if (CompressionTypeUtility::IsUnknownType(type)) {
+ return nullptr;
+ }
+
+ // Get the decompressor.
+ return m_get_decompressor_function(type);
+ }
+
+ bool IsInitialized() const {
+ return m_table.IsInitialized();
+ }
+
+ private:
+ size_t m_block_size_max;
+ size_t m_continuous_reading_size_max;
+ BucketTree m_table;
+ VirtualFile m_data_storage;
+ GetDecompressorFunction m_get_decompressor_function;
+ };
+
+ class CacheManager {
+ YUZU_NON_COPYABLE(CacheManager);
+ YUZU_NON_MOVEABLE(CacheManager);
+
+ private:
+ struct AccessRange {
+ s64 virtual_offset;
+ s64 virtual_size;
+ u32 physical_size;
+ bool is_block_alignment_required;
+
+ s64 GetEndVirtualOffset() const {
+ return this->virtual_offset + this->virtual_size;
+ }
+ };
+ static_assert(std::is_trivial_v<AccessRange>);
+
+ public:
+ CacheManager() = default;
+
+ public:
+ Result Initialize(s64 storage_size, size_t cache_size_0, size_t cache_size_1,
+ size_t max_cache_entries) {
+ // Set our fields.
+ m_storage_size = storage_size;
+
+ R_SUCCEED();
+ }
+
+ Result Read(CompressedStorageCore& core, s64 offset, void* buffer, size_t size) {
+ // If we have nothing to read, succeed.
+ R_SUCCEED_IF(size == 0);
+
+ // Check that we have a buffer to read into.
+ R_UNLESS(buffer != nullptr, ResultNullptrArgument);
+
+ // Check that the read is in bounds.
+ R_UNLESS(offset <= m_storage_size, ResultInvalidOffset);
+
+ // Determine how much we can read.
+ const size_t read_size = std::min<size_t>(size, m_storage_size - offset);
+
+ // Create head/tail ranges.
+ AccessRange head_range = {};
+ AccessRange tail_range = {};
+ bool is_tail_set = false;
+
+ // Operate to determine the head range.
+ R_TRY(core.OperatePerEntry(
+ offset, 1,
+ [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
+ s64 data_offset, s64 data_read_size) -> Result {
+ // Set the head range.
+ head_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = static_cast<u32>(entry.phys_size),
+ .is_block_alignment_required =
+ CompressionTypeUtility::IsBlockAlignmentRequired(
+ entry.compression_type),
+ };
+
+ // If required, set the tail range.
+ if (static_cast<s64>(offset + read_size) <=
+ entry.virt_offset + virtual_data_size) {
+ tail_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = static_cast<u32>(entry.phys_size),
+ .is_block_alignment_required =
+ CompressionTypeUtility::IsBlockAlignmentRequired(
+ entry.compression_type),
+ };
+ is_tail_set = true;
+ }
+
+ // We only want to determine the head range, so we're not continuous.
+ *out_continuous = false;
+ R_SUCCEED();
+ }));
+
+ // If necessary, determine the tail range.
+ if (!is_tail_set) {
+ R_TRY(core.OperatePerEntry(
+ offset + read_size - 1, 1,
+ [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
+ s64 data_offset, s64 data_read_size) -> Result {
+ // Set the tail range.
+ tail_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = static_cast<u32>(entry.phys_size),
+ .is_block_alignment_required =
+ CompressionTypeUtility::IsBlockAlignmentRequired(
+ entry.compression_type),
+ };
+
+ // We only want to determine the tail range, so we're not continuous.
+ *out_continuous = false;
+ R_SUCCEED();
+ }));
+ }
+
+ // Begin performing the accesses.
+ s64 cur_offset = offset;
+ size_t cur_size = read_size;
+ char* cur_dst = static_cast<char*>(buffer);
+
+ // Determine our alignment.
+ const bool head_unaligned = head_range.is_block_alignment_required &&
+ (cur_offset != head_range.virtual_offset ||
+ static_cast<s64>(cur_size) < head_range.virtual_size);
+ const bool tail_unaligned = [&]() -> bool {
+ if (tail_range.is_block_alignment_required) {
+ if (static_cast<s64>(cur_size + cur_offset) ==
+ tail_range.GetEndVirtualOffset()) {
+ return false;
+ } else if (!head_unaligned) {
+ return true;
+ } else {
+ return head_range.GetEndVirtualOffset() <
+ static_cast<s64>(cur_size + cur_offset);
+ }
+ } else {
+ return false;
+ }
+ }();
+
+ // Determine start/end offsets.
+ const s64 start_offset =
+ head_range.is_block_alignment_required ? head_range.virtual_offset : cur_offset;
+ const s64 end_offset = tail_range.is_block_alignment_required
+ ? tail_range.GetEndVirtualOffset()
+ : cur_offset + cur_size;
+
+ // Perform the read.
+ bool is_burst_reading = false;
+ R_TRY(core.Read(
+ start_offset, end_offset - start_offset,
+ [&](size_t size_buffer_required,
+ const CompressedStorageCore::ReadImplFunction& read_impl) -> Result {
+ // Determine whether we're burst reading.
+ const AccessRange* unaligned_range = nullptr;
+ if (!is_burst_reading) {
+ // Check whether we're using head, tail, or none as unaligned.
+ if (head_unaligned && head_range.virtual_offset <= cur_offset &&
+ cur_offset < head_range.GetEndVirtualOffset()) {
+ unaligned_range = std::addressof(head_range);
+ } else if (tail_unaligned && tail_range.virtual_offset <= cur_offset &&
+ cur_offset < tail_range.GetEndVirtualOffset()) {
+ unaligned_range = std::addressof(tail_range);
+ } else {
+ is_burst_reading = true;
+ }
+ }
+ ASSERT((is_burst_reading ^ (unaligned_range != nullptr)));
+
+ // Perform reading by burst, or not.
+ if (is_burst_reading) {
+ // Check that the access is valid for burst reading.
+ ASSERT(size_buffer_required <= cur_size);
+
+ // Perform the read.
+ Result rc = read_impl(cur_dst, size_buffer_required);
+ if (R_FAILED(rc)) {
+ R_THROW(rc);
+ }
+
+ // Advance.
+ cur_dst += size_buffer_required;
+ cur_offset += size_buffer_required;
+ cur_size -= size_buffer_required;
+
+ // Determine whether we're going to continue burst reading.
+ const s64 offset_aligned =
+ tail_unaligned ? tail_range.virtual_offset : end_offset;
+ ASSERT(cur_offset <= offset_aligned);
+
+ if (offset_aligned <= cur_offset) {
+ is_burst_reading = false;
+ }
+ } else {
+ // We're not burst reading, so we have some unaligned range.
+ ASSERT(unaligned_range != nullptr);
+
+ // Check that the size is correct.
+ ASSERT(size_buffer_required ==
+ static_cast<size_t>(unaligned_range->virtual_size));
+
+ // Get a pooled buffer for our read.
+ PooledBuffer pooled_buffer;
+ pooled_buffer.Allocate(size_buffer_required, size_buffer_required);
+
+ // Perform read.
+ Result rc = read_impl(pooled_buffer.GetBuffer(), size_buffer_required);
+ if (R_FAILED(rc)) {
+ R_THROW(rc);
+ }
+
+ // Copy the data we read to the destination.
+ const size_t skip_size = cur_offset - unaligned_range->virtual_offset;
+ const size_t copy_size = std::min<size_t>(
+ cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset);
+
+ std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size);
+
+ // Advance.
+ cur_dst += copy_size;
+ cur_offset += copy_size;
+ cur_size -= copy_size;
+ }
+
+ R_SUCCEED();
+ }));
+
+ R_SUCCEED();
+ }
+
+ private:
+ s64 m_storage_size = 0;
+ };
+
+public:
+ CompressedStorage() = default;
+ virtual ~CompressedStorage() {
+ this->Finalize();
+ }
+
+ Result Initialize(VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
+ s32 bktr_entry_count, size_t block_size_max,
+ size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor,
+ size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) {
+ // Initialize our core.
+ R_TRY(m_core.Initialize(data_storage, node_storage, entry_storage, bktr_entry_count,
+ block_size_max, continuous_reading_size_max, get_decompressor));
+
+ // Get our core size.
+ s64 core_size = 0;
+ R_TRY(m_core.GetSize(std::addressof(core_size)));
+
+ // Initialize our cache manager.
+ R_TRY(m_cache_manager.Initialize(core_size, cache_size_0, cache_size_1, max_cache_entries));
+
+ R_SUCCEED();
+ }
+
+ void Finalize() {
+ m_core.Finalize();
+ }
+
+ VirtualFile GetDataStorage() {
+ return m_core.GetDataStorage();
+ }
+
+ Result GetDataStorageSize(s64* out) {
+ R_RETURN(m_core.GetDataStorageSize(out));
+ }
+
+ Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count, s64 offset,
+ s64 size) {
+ R_RETURN(m_core.GetEntryList(out_entries, out_read_count, max_entry_count, offset, size));
+ }
+
+ BucketTree& GetEntryTable() {
+ return m_core.GetEntryTable();
+ }
+
+public:
+ virtual size_t GetSize() const override {
+ s64 ret{};
+ m_core.GetSize(&ret);
+ return ret;
+ }
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ if (R_SUCCEEDED(m_cache_manager.Read(m_core, offset, buffer, size))) {
+ return size;
+ } else {
+ return 0;
+ }
+ }
+
+private:
+ mutable CompressedStorageCore m_core;
+ mutable CacheManager m_cache_manager;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_common.h b/src/core/file_sys/fssystem/fssystem_compression_common.h
new file mode 100644
index 000000000..266e0a7e5
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_common.h
@@ -0,0 +1,43 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/hle/result.h"
+
+namespace FileSys {
+
+enum class CompressionType : u8 {
+ None = 0,
+ Zeros = 1,
+ Two = 2,
+ Lz4 = 3,
+ Unknown = 4,
+};
+
+using DecompressorFunction = Result (*)(void*, size_t, const void*, size_t);
+using GetDecompressorFunction = DecompressorFunction (*)(CompressionType);
+
+constexpr s64 CompressionBlockAlignment = 0x10;
+
+namespace CompressionTypeUtility {
+
+constexpr bool IsBlockAlignmentRequired(CompressionType type) {
+ return type != CompressionType::None && type != CompressionType::Zeros;
+}
+
+constexpr bool IsDataStorageAccessRequired(CompressionType type) {
+ return type != CompressionType::Zeros;
+}
+
+constexpr bool IsRandomAccessible(CompressionType type) {
+ return type == CompressionType::None;
+}
+
+constexpr bool IsUnknownType(CompressionType type) {
+ return type >= CompressionType::Unknown;
+}
+
+} // namespace CompressionTypeUtility
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp
new file mode 100644
index 000000000..ef552cefe
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp
@@ -0,0 +1,36 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/lz4_compression.h"
+#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
+
+namespace FileSys {
+
+namespace {
+
+Result DecompressLz4(void* dst, size_t dst_size, const void* src, size_t src_size) {
+ auto result = Common::Compression::DecompressDataLZ4(dst, dst_size, src, src_size);
+ R_UNLESS(static_cast<size_t>(result) == dst_size, ResultUnexpectedInCompressedStorageC);
+ R_SUCCEED();
+}
+
+constexpr DecompressorFunction GetNcaDecompressorFunction(CompressionType type) {
+ switch (type) {
+ case CompressionType::Lz4:
+ return DecompressLz4;
+ default:
+ return nullptr;
+ }
+}
+
+} // namespace
+
+const NcaCompressionConfiguration& GetNcaCompressionConfiguration() {
+ static const NcaCompressionConfiguration configuration = {
+ .get_decompressor = GetNcaDecompressorFunction,
+ };
+
+ return configuration;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.h b/src/core/file_sys/fssystem/fssystem_compression_configuration.h
new file mode 100644
index 000000000..ec9b48e9a
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
+
+namespace FileSys {
+
+const NcaCompressionConfiguration& GetNcaCompressionConfiguration();
+
+}
diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp
new file mode 100644
index 000000000..a4f0cde28
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp
@@ -0,0 +1,65 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/crypto/aes_util.h"
+#include "core/crypto/key_manager.h"
+#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
+
+namespace FileSys {
+
+namespace {
+
+void GenerateKey(void* dst_key, size_t dst_key_size, const void* src_key, size_t src_key_size,
+ s32 key_type) {
+ if (key_type == static_cast<s32>(KeyType::ZeroKey)) {
+ std::memset(dst_key, 0, dst_key_size);
+ return;
+ }
+
+ if (key_type == static_cast<s32>(KeyType::InvalidKey) ||
+ key_type < static_cast<s32>(KeyType::ZeroKey) ||
+ key_type >= static_cast<s32>(KeyType::NcaExternalKey)) {
+ std::memset(dst_key, 0xFF, dst_key_size);
+ return;
+ }
+
+ const auto& instance = Core::Crypto::KeyManager::Instance();
+
+ if (key_type == static_cast<s32>(KeyType::NcaHeaderKey1) ||
+ key_type == static_cast<s32>(KeyType::NcaHeaderKey2)) {
+ const s32 key_index = static_cast<s32>(KeyType::NcaHeaderKey2) == key_type;
+ const auto key = instance.GetKey(Core::Crypto::S256KeyType::Header);
+ std::memcpy(dst_key, key.data() + key_index * 0x10, std::min(dst_key_size, key.size() / 2));
+ return;
+ }
+
+ const s32 key_generation =
+ std::max(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1;
+ const s32 key_index = key_type % NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount;
+
+ Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
+ instance.GetKey(Core::Crypto::S128KeyType::KeyArea, key_generation, key_index),
+ Core::Crypto::Mode::ECB);
+ cipher.Transcode(reinterpret_cast<const u8*>(src_key), src_key_size,
+ reinterpret_cast<u8*>(dst_key), Core::Crypto::Op::Decrypt);
+}
+
+} // namespace
+
+const NcaCryptoConfiguration& GetCryptoConfiguration() {
+ static const NcaCryptoConfiguration configuration = {
+ .header_1_sign_key_moduli{},
+ .header_1_sign_key_public_exponent{},
+ .key_area_encryption_key_source{},
+ .header_encryption_key_source{},
+ .header_encrypted_encryption_keys{},
+ .generate_key = GenerateKey,
+ .verify_sign1{},
+ .is_plaintext_header_available{},
+ .is_available_sw_key{},
+ };
+
+ return configuration;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.h b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h
new file mode 100644
index 000000000..7fd9c5a8d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
+
+namespace FileSys {
+
+const NcaCryptoConfiguration& GetCryptoConfiguration();
+
+}
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
new file mode 100644
index 000000000..4a75b5308
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
@@ -0,0 +1,127 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
+#include "core/file_sys/vfs_offset.h"
+
+namespace FileSys {
+
+HierarchicalIntegrityVerificationStorage::HierarchicalIntegrityVerificationStorage()
+ : m_data_size(-1) {
+ for (size_t i = 0; i < MaxLayers - 1; i++) {
+ m_verify_storages[i] = std::make_shared<IntegrityVerificationStorage>();
+ }
+}
+
+Result HierarchicalIntegrityVerificationStorage::Initialize(
+ const HierarchicalIntegrityVerificationInformation& info,
+ HierarchicalStorageInformation storage, int max_data_cache_entries, int max_hash_cache_entries,
+ s8 buffer_level) {
+ // Validate preconditions.
+ ASSERT(IntegrityMinLayerCount <= info.max_layers && info.max_layers <= IntegrityMaxLayerCount);
+
+ // Set member variables.
+ m_max_layers = info.max_layers;
+
+ // Initialize the top level verification storage.
+ m_verify_storages[0]->Initialize(storage[HierarchicalStorageInformation::MasterStorage],
+ storage[HierarchicalStorageInformation::Layer1Storage],
+ static_cast<s64>(1) << info.info[0].block_order, HashSize,
+ false);
+
+ // Ensure we don't leak state if further initialization goes wrong.
+ ON_RESULT_FAILURE {
+ m_verify_storages[0]->Finalize();
+ m_data_size = -1;
+ };
+
+ // Initialize the top level buffer storage.
+ m_buffer_storages[0] = m_verify_storages[0];
+ R_UNLESS(m_buffer_storages[0] != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Prepare to initialize the level storages.
+ s32 level = 0;
+
+ // Ensure we don't leak state if further initialization goes wrong.
+ ON_RESULT_FAILURE_2 {
+ m_verify_storages[level + 1]->Finalize();
+ for (; level > 0; --level) {
+ m_buffer_storages[level].reset();
+ m_verify_storages[level]->Finalize();
+ }
+ };
+
+ // Initialize the level storages.
+ for (; level < m_max_layers - 3; ++level) {
+ // Initialize the verification storage.
+ auto buffer_storage =
+ std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
+ m_verify_storages[level + 1]->Initialize(
+ std::move(buffer_storage), storage[level + 2],
+ static_cast<s64>(1) << info.info[level + 1].block_order,
+ static_cast<s64>(1) << info.info[level].block_order, false);
+
+ // Initialize the buffer storage.
+ m_buffer_storages[level + 1] = m_verify_storages[level + 1];
+ R_UNLESS(m_buffer_storages[level + 1] != nullptr,
+ ResultAllocationMemoryFailedAllocateShared);
+ }
+
+ // Initialize the final level storage.
+ {
+ // Initialize the verification storage.
+ auto buffer_storage =
+ std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
+ m_verify_storages[level + 1]->Initialize(
+ std::move(buffer_storage), storage[level + 2],
+ static_cast<s64>(1) << info.info[level + 1].block_order,
+ static_cast<s64>(1) << info.info[level].block_order, true);
+
+ // Initialize the buffer storage.
+ m_buffer_storages[level + 1] = m_verify_storages[level + 1];
+ R_UNLESS(m_buffer_storages[level + 1] != nullptr,
+ ResultAllocationMemoryFailedAllocateShared);
+ }
+
+ // Set the data size.
+ m_data_size = info.info[level + 1].size;
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+void HierarchicalIntegrityVerificationStorage::Finalize() {
+ if (m_data_size >= 0) {
+ m_data_size = 0;
+
+ for (s32 level = m_max_layers - 2; level >= 0; --level) {
+ m_buffer_storages[level].reset();
+ m_verify_storages[level]->Finalize();
+ }
+
+ m_data_size = -1;
+ }
+}
+
+size_t HierarchicalIntegrityVerificationStorage::Read(u8* buffer, size_t size,
+ size_t offset) const {
+ // Validate preconditions.
+ ASSERT(m_data_size >= 0);
+
+ // Succeed if zero-size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ // Read the data.
+ return m_buffer_storages[m_max_layers - 2]->Read(buffer, size, offset);
+}
+
+size_t HierarchicalIntegrityVerificationStorage::GetSize() const {
+ return m_data_size;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
new file mode 100644
index 000000000..5cf697efe
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
@@ -0,0 +1,164 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/alignment.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/fssystem/fs_types.h"
+#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
+#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
+#include "core/file_sys/vfs_offset.h"
+
+namespace FileSys {
+
+struct HierarchicalIntegrityVerificationLevelInformation {
+ Int64 offset;
+ Int64 size;
+ s32 block_order;
+ std::array<u8, 4> reserved;
+};
+static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationLevelInformation>);
+static_assert(sizeof(HierarchicalIntegrityVerificationLevelInformation) == 0x18);
+static_assert(alignof(HierarchicalIntegrityVerificationLevelInformation) == 0x4);
+
+struct HierarchicalIntegrityVerificationInformation {
+ u32 max_layers;
+ std::array<HierarchicalIntegrityVerificationLevelInformation, IntegrityMaxLayerCount - 1> info;
+ HashSalt seed;
+
+ s64 GetLayeredHashSize() const {
+ return this->info[this->max_layers - 2].offset;
+ }
+
+ s64 GetDataOffset() const {
+ return this->info[this->max_layers - 2].offset;
+ }
+
+ s64 GetDataSize() const {
+ return this->info[this->max_layers - 2].size;
+ }
+};
+static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationInformation>);
+
+struct HierarchicalIntegrityVerificationMetaInformation {
+ u32 magic;
+ u32 version;
+ u32 master_hash_size;
+ HierarchicalIntegrityVerificationInformation level_hash_info;
+};
+static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationMetaInformation>);
+
+struct HierarchicalIntegrityVerificationSizeSet {
+ s64 control_size;
+ s64 master_hash_size;
+ std::array<s64, IntegrityMaxLayerCount - 2> layered_hash_sizes;
+};
+static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationSizeSet>);
+
+class HierarchicalIntegrityVerificationStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(HierarchicalIntegrityVerificationStorage);
+ YUZU_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage);
+
+public:
+ using GenerateRandomFunction = void (*)(void* dst, size_t size);
+
+ class HierarchicalStorageInformation {
+ public:
+ enum {
+ MasterStorage = 0,
+ Layer1Storage = 1,
+ Layer2Storage = 2,
+ Layer3Storage = 3,
+ Layer4Storage = 4,
+ Layer5Storage = 5,
+ DataStorage = 6,
+ };
+
+ private:
+ std::array<VirtualFile, DataStorage + 1> m_storages;
+
+ public:
+ void SetMasterHashStorage(VirtualFile s) {
+ m_storages[MasterStorage] = s;
+ }
+ void SetLayer1HashStorage(VirtualFile s) {
+ m_storages[Layer1Storage] = s;
+ }
+ void SetLayer2HashStorage(VirtualFile s) {
+ m_storages[Layer2Storage] = s;
+ }
+ void SetLayer3HashStorage(VirtualFile s) {
+ m_storages[Layer3Storage] = s;
+ }
+ void SetLayer4HashStorage(VirtualFile s) {
+ m_storages[Layer4Storage] = s;
+ }
+ void SetLayer5HashStorage(VirtualFile s) {
+ m_storages[Layer5Storage] = s;
+ }
+ void SetDataStorage(VirtualFile s) {
+ m_storages[DataStorage] = s;
+ }
+
+ VirtualFile& operator[](s32 index) {
+ ASSERT(MasterStorage <= index && index <= DataStorage);
+ return m_storages[index];
+ }
+ };
+
+public:
+ HierarchicalIntegrityVerificationStorage();
+ virtual ~HierarchicalIntegrityVerificationStorage() override {
+ this->Finalize();
+ }
+
+ Result Initialize(const HierarchicalIntegrityVerificationInformation& info,
+ HierarchicalStorageInformation storage, int max_data_cache_entries,
+ int max_hash_cache_entries, s8 buffer_level);
+ void Finalize();
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+ virtual size_t GetSize() const override;
+
+ bool IsInitialized() const {
+ return m_data_size >= 0;
+ }
+
+ s64 GetL1HashVerificationBlockSize() const {
+ return m_verify_storages[m_max_layers - 2]->GetBlockSize();
+ }
+
+ VirtualFile GetL1HashStorage() {
+ return std::make_shared<OffsetVfsFile>(
+ m_buffer_storages[m_max_layers - 3],
+ Common::DivideUp(m_data_size, this->GetL1HashVerificationBlockSize()), 0);
+ }
+
+public:
+ static constexpr s8 GetDefaultDataCacheBufferLevel(u32 max_layers) {
+ return static_cast<s8>(16 + max_layers - 2);
+ }
+
+protected:
+ static constexpr s64 HashSize = 256 / 8;
+ static constexpr size_t MaxLayers = IntegrityMaxLayerCount;
+
+private:
+ static GenerateRandomFunction s_generate_random;
+
+ static void SetGenerateRandomFunction(GenerateRandomFunction func) {
+ s_generate_random = func;
+ }
+
+private:
+ friend struct HierarchicalIntegrityVerificationMetaInformation;
+
+private:
+ std::array<std::shared_ptr<IntegrityVerificationStorage>, MaxLayers - 1> m_verify_storages;
+ std::array<VirtualFile, MaxLayers - 1> m_buffer_storages;
+ s64 m_data_size;
+ s32 m_max_layers;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
new file mode 100644
index 000000000..caea0b8f8
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
@@ -0,0 +1,80 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "common/scope_exit.h"
+#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
+
+namespace FileSys {
+
+namespace {
+
+s32 Log2(s32 value) {
+ ASSERT(value > 0);
+ ASSERT(Common::IsPowerOfTwo(value));
+
+ s32 log = 0;
+ while ((value >>= 1) > 0) {
+ ++log;
+ }
+ return log;
+}
+
+} // namespace
+
+Result HierarchicalSha256Storage::Initialize(VirtualFile* base_storages, s32 layer_count,
+ size_t htbs, void* hash_buf, size_t hash_buf_size) {
+ // Validate preconditions.
+ ASSERT(layer_count == LayerCount);
+ ASSERT(Common::IsPowerOfTwo(htbs));
+ ASSERT(hash_buf != nullptr);
+
+ // Set size tracking members.
+ m_hash_target_block_size = static_cast<s32>(htbs);
+ m_log_size_ratio = Log2(m_hash_target_block_size / HashSize);
+
+ // Get the base storage size.
+ m_base_storage_size = base_storages[2]->GetSize();
+ {
+ auto size_guard = SCOPE_GUARD({ m_base_storage_size = 0; });
+ R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize)
+ << m_log_size_ratio << m_log_size_ratio,
+ ResultHierarchicalSha256BaseStorageTooLarge);
+ size_guard.Cancel();
+ }
+
+ // Set hash buffer tracking members.
+ m_base_storage = base_storages[2];
+ m_hash_buffer = static_cast<char*>(hash_buf);
+ m_hash_buffer_size = hash_buf_size;
+
+ // Read the master hash.
+ std::array<u8, HashSize> master_hash{};
+ base_storages[0]->ReadObject(std::addressof(master_hash));
+
+ // Read and validate the data being hashed.
+ s64 hash_storage_size = base_storages[1]->GetSize();
+ ASSERT(Common::IsAligned(hash_storage_size, HashSize));
+ ASSERT(hash_storage_size <= m_hash_target_block_size);
+ ASSERT(hash_storage_size <= static_cast<s64>(m_hash_buffer_size));
+
+ base_storages[1]->Read(reinterpret_cast<u8*>(m_hash_buffer),
+ static_cast<size_t>(hash_storage_size), 0);
+
+ R_SUCCEED();
+}
+
+size_t HierarchicalSha256Storage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Succeed if zero-size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate that we have a buffer to read into.
+ ASSERT(buffer != nullptr);
+
+ // Read the data.
+ return m_base_storage->Read(buffer, size, offset);
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
new file mode 100644
index 000000000..18df400af
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
@@ -0,0 +1,44 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <mutex>
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/vfs.h"
+
+namespace FileSys {
+
+class HierarchicalSha256Storage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(HierarchicalSha256Storage);
+ YUZU_NON_MOVEABLE(HierarchicalSha256Storage);
+
+public:
+ static constexpr s32 LayerCount = 3;
+ static constexpr size_t HashSize = 256 / 8;
+
+public:
+ HierarchicalSha256Storage() : m_mutex() {}
+
+ Result Initialize(VirtualFile* base_storages, s32 layer_count, size_t htbs, void* hash_buf,
+ size_t hash_buf_size);
+
+ virtual size_t GetSize() const override {
+ return m_base_storage->GetSize();
+ }
+
+ virtual size_t Read(u8* buffer, size_t length, size_t offset) const override;
+
+private:
+ VirtualFile m_base_storage;
+ s64 m_base_storage_size;
+ char* m_hash_buffer;
+ size_t m_hash_buffer_size;
+ s32 m_hash_target_block_size;
+ s32 m_log_size_ratio;
+ std::mutex m_mutex;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp
new file mode 100644
index 000000000..7544e70b2
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp
@@ -0,0 +1,119 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
+
+namespace FileSys {
+
+Result IndirectStorage::Initialize(VirtualFile table_storage) {
+ // Read and verify the bucket tree header.
+ BucketTree::Header header;
+ table_storage->ReadObject(std::addressof(header));
+ R_TRY(header.Verify());
+
+ // Determine extents.
+ const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
+ const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
+ const auto node_storage_offset = QueryHeaderStorageSize();
+ const auto entry_storage_offset = node_storage_offset + node_storage_size;
+
+ // Initialize.
+ R_RETURN(this->Initialize(
+ std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
+ std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
+ header.entry_count));
+}
+
+void IndirectStorage::Finalize() {
+ if (this->IsInitialized()) {
+ m_table.Finalize();
+ for (auto i = 0; i < StorageCount; i++) {
+ m_data_storage[i] = VirtualFile();
+ }
+ }
+}
+
+Result IndirectStorage::GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count,
+ s64 offset, s64 size) {
+ // Validate pre-conditions.
+ ASSERT(offset >= 0);
+ ASSERT(size >= 0);
+ ASSERT(this->IsInitialized());
+
+ // Clear the out count.
+ R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
+ *out_entry_count = 0;
+
+ // Succeed if there's no range.
+ R_SUCCEED_IF(size == 0);
+
+ // If we have an output array, we need it to be non-null.
+ R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
+
+ // Check that our range is valid.
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
+
+ // Find the offset in our tree.
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
+ ResultInvalidIndirectEntryOffset);
+ }
+
+ // Prepare to loop over entries.
+ const auto end_offset = offset + static_cast<s64>(size);
+ s32 count = 0;
+
+ auto cur_entry = *visitor.Get<Entry>();
+ while (cur_entry.GetVirtualOffset() < end_offset) {
+ // Try to write the entry to the out list.
+ if (entry_count != 0) {
+ if (count >= entry_count) {
+ break;
+ }
+ std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
+ }
+
+ count++;
+
+ // Advance.
+ if (visitor.CanMoveNext()) {
+ R_TRY(visitor.MoveNext());
+ cur_entry = *visitor.Get<Entry>();
+ } else {
+ break;
+ }
+ }
+
+ // Write the output count.
+ *out_entry_count = count;
+ R_SUCCEED();
+}
+
+size_t IndirectStorage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Validate pre-conditions.
+ ASSERT(this->IsInitialized());
+ ASSERT(buffer != nullptr);
+
+ // Succeed if there's nothing to read.
+ if (size == 0) {
+ return 0;
+ }
+
+ const_cast<IndirectStorage*>(this)->OperatePerEntry<true, true>(
+ offset, size,
+ [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
+ storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
+ static_cast<size_t>(cur_size), data_offset);
+ R_SUCCEED();
+ });
+
+ return size;
+}
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.h b/src/core/file_sys/fssystem/fssystem_indirect_storage.h
new file mode 100644
index 000000000..7854335bf
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.h
@@ -0,0 +1,294 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
+#include "core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h"
+#include "core/file_sys/vfs.h"
+#include "core/file_sys/vfs_offset.h"
+
+namespace FileSys {
+
+class IndirectStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(IndirectStorage);
+ YUZU_NON_MOVEABLE(IndirectStorage);
+
+public:
+ static constexpr s32 StorageCount = 2;
+ static constexpr size_t NodeSize = 16_KiB;
+
+ struct Entry {
+ std::array<u8, sizeof(s64)> virt_offset;
+ std::array<u8, sizeof(s64)> phys_offset;
+ s32 storage_index;
+
+ void SetVirtualOffset(const s64& ofs) {
+ std::memcpy(this->virt_offset.data(), std::addressof(ofs), sizeof(s64));
+ }
+
+ s64 GetVirtualOffset() const {
+ s64 offset;
+ std::memcpy(std::addressof(offset), this->virt_offset.data(), sizeof(s64));
+ return offset;
+ }
+
+ void SetPhysicalOffset(const s64& ofs) {
+ std::memcpy(this->phys_offset.data(), std::addressof(ofs), sizeof(s64));
+ }
+
+ s64 GetPhysicalOffset() const {
+ s64 offset;
+ std::memcpy(std::addressof(offset), this->phys_offset.data(), sizeof(s64));
+ return offset;
+ }
+ };
+ static_assert(std::is_trivial_v<Entry>);
+ static_assert(sizeof(Entry) == 0x14);
+
+ struct EntryData {
+ s64 virt_offset;
+ s64 phys_offset;
+ s32 storage_index;
+
+ void Set(const Entry& entry) {
+ this->virt_offset = entry.GetVirtualOffset();
+ this->phys_offset = entry.GetPhysicalOffset();
+ this->storage_index = entry.storage_index;
+ }
+ };
+ static_assert(std::is_trivial_v<EntryData>);
+
+public:
+ IndirectStorage() : m_table(), m_data_storage() {}
+ virtual ~IndirectStorage() {
+ this->Finalize();
+ }
+
+ Result Initialize(VirtualFile table_storage);
+ void Finalize();
+
+ bool IsInitialized() const {
+ return m_table.IsInitialized();
+ }
+
+ Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, s32 entry_count) {
+ R_RETURN(
+ m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
+ }
+
+ void SetStorage(s32 idx, VirtualFile storage) {
+ ASSERT(0 <= idx && idx < StorageCount);
+ m_data_storage[idx] = storage;
+ }
+
+ template <typename T>
+ void SetStorage(s32 idx, T storage, s64 offset, s64 size) {
+ ASSERT(0 <= idx && idx < StorageCount);
+ m_data_storage[idx] = std::make_shared<OffsetVfsFile>(storage, size, offset);
+ }
+
+ Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
+ s64 size);
+
+ virtual size_t GetSize() const override {
+ BucketTree::Offsets offsets{};
+ m_table.GetOffsets(std::addressof(offsets));
+
+ return offsets.end_offset;
+ }
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+
+public:
+ static constexpr s64 QueryHeaderStorageSize() {
+ return BucketTree::QueryHeaderStorageSize();
+ }
+
+ static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
+ return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
+ }
+
+ static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
+ return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
+ }
+
+protected:
+ BucketTree& GetEntryTable() {
+ return m_table;
+ }
+
+ VirtualFile& GetDataStorage(s32 index) {
+ ASSERT(0 <= index && index < StorageCount);
+ return m_data_storage[index];
+ }
+
+ template <bool ContinuousCheck, bool RangeCheck, typename F>
+ Result OperatePerEntry(s64 offset, s64 size, F func);
+
+private:
+ struct ContinuousReadingEntry {
+ static constexpr size_t FragmentSizeMax = 4_KiB;
+
+ IndirectStorage::Entry entry;
+
+ s64 GetVirtualOffset() const {
+ return this->entry.GetVirtualOffset();
+ }
+
+ s64 GetPhysicalOffset() const {
+ return this->entry.GetPhysicalOffset();
+ }
+
+ bool IsFragment() const {
+ return this->entry.storage_index != 0;
+ }
+ };
+ static_assert(std::is_trivial_v<ContinuousReadingEntry>);
+
+private:
+ mutable BucketTree m_table;
+ std::array<VirtualFile, StorageCount> m_data_storage;
+};
+
+template <bool ContinuousCheck, bool RangeCheck, typename F>
+Result IndirectStorage::OperatePerEntry(s64 offset, s64 size, F func) {
+ // Validate preconditions.
+ ASSERT(offset >= 0);
+ ASSERT(size >= 0);
+ ASSERT(this->IsInitialized());
+
+ // Succeed if there's nothing to operate on.
+ R_SUCCEED_IF(size == 0);
+
+ // Get the table offsets.
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ // Validate arguments.
+ R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
+
+ // Find the offset in our tree.
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
+ ResultInvalidIndirectEntryOffset);
+ }
+
+ // Prepare to operate in chunks.
+ auto cur_offset = offset;
+ const auto end_offset = offset + static_cast<s64>(size);
+ BucketTree::ContinuousReadingInfo cr_info;
+
+ while (cur_offset < end_offset) {
+ // Get the current entry.
+ const auto cur_entry = *visitor.Get<Entry>();
+
+ // Get and validate the entry's offset.
+ const auto cur_entry_offset = cur_entry.GetVirtualOffset();
+ R_UNLESS(cur_entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
+
+ // Validate the storage index.
+ R_UNLESS(0 <= cur_entry.storage_index && cur_entry.storage_index < StorageCount,
+ ResultInvalidIndirectEntryStorageIndex);
+
+ // If we need to check the continuous info, do so.
+ if constexpr (ContinuousCheck) {
+ // Scan, if we need to.
+ if (cr_info.CheckNeedScan()) {
+ R_TRY(visitor.ScanContinuousReading<ContinuousReadingEntry>(
+ std::addressof(cr_info), cur_offset,
+ static_cast<size_t>(end_offset - cur_offset)));
+ }
+
+ // Process a base storage entry.
+ if (cr_info.CanDo()) {
+ // Ensure that we can process.
+ R_UNLESS(cur_entry.storage_index == 0, ResultInvalidIndirectEntryStorageIndex);
+
+ // Ensure that we remain within range.
+ const auto data_offset = cur_offset - cur_entry_offset;
+ const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
+ const auto cur_size = static_cast<s64>(cr_info.GetReadSize());
+
+ // If we should, verify the range.
+ if constexpr (RangeCheck) {
+ // Get the current data storage's size.
+ s64 cur_data_storage_size = m_data_storage[0]->GetSize();
+
+ R_UNLESS(0 <= cur_entry_phys_offset &&
+ cur_entry_phys_offset <= cur_data_storage_size,
+ ResultInvalidIndirectEntryOffset);
+ R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <=
+ cur_data_storage_size,
+ ResultInvalidIndirectStorageSize);
+ }
+
+ // Operate.
+ R_TRY(func(m_data_storage[0], cur_entry_phys_offset + data_offset, cur_offset,
+ cur_size));
+
+ // Mark as done.
+ cr_info.Done();
+ }
+ }
+
+ // Get and validate the next entry offset.
+ s64 next_entry_offset;
+ if (visitor.CanMoveNext()) {
+ R_TRY(visitor.MoveNext());
+ next_entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
+ R_UNLESS(table_offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
+ } else {
+ next_entry_offset = table_offsets.end_offset;
+ }
+ R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
+
+ // Get the offset of the entry in the data we read.
+ const auto data_offset = cur_offset - cur_entry_offset;
+ const auto data_size = (next_entry_offset - cur_entry_offset);
+ ASSERT(data_size > 0);
+
+ // Determine how much is left.
+ const auto remaining_size = end_offset - cur_offset;
+ const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
+ ASSERT(cur_size <= size);
+
+ // Operate, if we need to.
+ bool needs_operate;
+ if constexpr (!ContinuousCheck) {
+ needs_operate = true;
+ } else {
+ needs_operate = !cr_info.IsDone() || cur_entry.storage_index != 0;
+ }
+
+ if (needs_operate) {
+ const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
+
+ if constexpr (RangeCheck) {
+ // Get the current data storage's size.
+ s64 cur_data_storage_size = m_data_storage[cur_entry.storage_index]->GetSize();
+
+ // Ensure that we remain within range.
+ R_UNLESS(0 <= cur_entry_phys_offset &&
+ cur_entry_phys_offset <= cur_data_storage_size,
+ ResultIndirectStorageCorrupted);
+ R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size,
+ ResultIndirectStorageCorrupted);
+ }
+
+ R_TRY(func(m_data_storage[cur_entry.storage_index], cur_entry_phys_offset + data_offset,
+ cur_offset, cur_size));
+ }
+
+ cur_offset += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
new file mode 100644
index 000000000..2c3da230c
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
@@ -0,0 +1,30 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
+
+namespace FileSys {
+
+Result IntegrityRomFsStorage::Initialize(
+ HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
+ HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
+ int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
+ // Set master hash.
+ m_master_hash = master_hash;
+ m_master_hash_storage = std::make_shared<ArrayVfsFile<sizeof(Hash)>>(m_master_hash.value);
+ R_UNLESS(m_master_hash_storage != nullptr,
+ ResultAllocationMemoryFailedInIntegrityRomFsStorageA);
+
+ // Set the master hash storage.
+ storage_info[0] = m_master_hash_storage;
+
+ // Initialize our integrity storage.
+ R_RETURN(m_integrity_storage.Initialize(level_hash_info, storage_info, max_data_cache_entries,
+ max_hash_cache_entries, buffer_level));
+}
+
+void IntegrityRomFsStorage::Finalize() {
+ m_integrity_storage.Finalize();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h
new file mode 100644
index 000000000..5f8512b2a
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h
@@ -0,0 +1,42 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
+#include "core/file_sys/fssystem/fssystem_nca_header.h"
+#include "core/file_sys/vfs_vector.h"
+
+namespace FileSys {
+
+constexpr inline size_t IntegrityLayerCountRomFs = 7;
+constexpr inline size_t IntegrityHashLayerBlockSize = 16_KiB;
+
+class IntegrityRomFsStorage : public IReadOnlyStorage {
+public:
+ IntegrityRomFsStorage() {}
+ virtual ~IntegrityRomFsStorage() override {
+ this->Finalize();
+ }
+
+ Result Initialize(
+ HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
+ HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
+ int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
+ void Finalize();
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ return m_integrity_storage.Read(buffer, size, offset);
+ }
+
+ virtual size_t GetSize() const override {
+ return m_integrity_storage.GetSize();
+ }
+
+private:
+ HierarchicalIntegrityVerificationStorage m_integrity_storage;
+ Hash m_master_hash;
+ std::shared_ptr<ArrayVfsFile<sizeof(Hash)>> m_master_hash_storage;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp
new file mode 100644
index 000000000..2f73abf86
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp
@@ -0,0 +1,91 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
+
+namespace FileSys {
+
+constexpr inline u32 ILog2(u32 val) {
+ ASSERT(val > 0);
+ return static_cast<u32>((sizeof(u32) * 8) - 1 - std::countl_zero<u32>(val));
+}
+
+void IntegrityVerificationStorage::Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
+ s64 upper_layer_verif_block_size, bool is_real_data) {
+ // Validate preconditions.
+ ASSERT(verif_block_size >= HashSize);
+
+ // Set storages.
+ m_hash_storage = hs;
+ m_data_storage = ds;
+
+ // Set verification block sizes.
+ m_verification_block_size = verif_block_size;
+ m_verification_block_order = ILog2(static_cast<u32>(verif_block_size));
+ ASSERT(m_verification_block_size == 1ll << m_verification_block_order);
+
+ // Set upper layer block sizes.
+ upper_layer_verif_block_size = std::max(upper_layer_verif_block_size, HashSize);
+ m_upper_layer_verification_block_size = upper_layer_verif_block_size;
+ m_upper_layer_verification_block_order = ILog2(static_cast<u32>(upper_layer_verif_block_size));
+ ASSERT(m_upper_layer_verification_block_size == 1ll << m_upper_layer_verification_block_order);
+
+ // Validate sizes.
+ {
+ s64 hash_size = m_hash_storage->GetSize();
+ s64 data_size = m_data_storage->GetSize();
+ ASSERT(((hash_size / HashSize) * m_verification_block_size) >= data_size);
+ }
+
+ // Set data.
+ m_is_real_data = is_real_data;
+}
+
+void IntegrityVerificationStorage::Finalize() {
+ m_hash_storage = VirtualFile();
+ m_data_storage = VirtualFile();
+}
+
+size_t IntegrityVerificationStorage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Succeed if zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ // Validate arguments.
+ ASSERT(buffer != nullptr);
+
+ // Validate the offset.
+ s64 data_size = m_data_storage->GetSize();
+ ASSERT(offset <= static_cast<size_t>(data_size));
+
+ // Validate the access range.
+ ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(
+ offset, size, Common::AlignUp(data_size, static_cast<size_t>(m_verification_block_size)))));
+
+ // Determine the read extents.
+ size_t read_size = size;
+ if (static_cast<s64>(offset + read_size) > data_size) {
+ // Determine the padding sizes.
+ s64 padding_offset = data_size - offset;
+ size_t padding_size = static_cast<size_t>(
+ m_verification_block_size - (padding_offset & (m_verification_block_size - 1)));
+ ASSERT(static_cast<s64>(padding_size) < m_verification_block_size);
+
+ // Clear the padding.
+ std::memset(static_cast<u8*>(buffer) + padding_offset, 0, padding_size);
+
+ // Set the new in-bounds size.
+ read_size = static_cast<size_t>(data_size - offset);
+ }
+
+ // Perform the read.
+ return m_data_storage->Read(buffer, read_size, offset);
+}
+
+size_t IntegrityVerificationStorage::GetSize() const {
+ return m_data_storage->GetSize();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h
new file mode 100644
index 000000000..09f76799d
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h
@@ -0,0 +1,65 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <optional>
+
+#include "core/file_sys/fssystem/fs_i_storage.h"
+#include "core/file_sys/fssystem/fs_types.h"
+
+namespace FileSys {
+
+class IntegrityVerificationStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(IntegrityVerificationStorage);
+ YUZU_NON_MOVEABLE(IntegrityVerificationStorage);
+
+public:
+ static constexpr s64 HashSize = 256 / 8;
+
+ struct BlockHash {
+ std::array<u8, HashSize> hash;
+ };
+ static_assert(std::is_trivial_v<BlockHash>);
+
+public:
+ IntegrityVerificationStorage()
+ : m_verification_block_size(0), m_verification_block_order(0),
+ m_upper_layer_verification_block_size(0), m_upper_layer_verification_block_order(0) {}
+ virtual ~IntegrityVerificationStorage() override {
+ this->Finalize();
+ }
+
+ void Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
+ s64 upper_layer_verif_block_size, bool is_real_data);
+ void Finalize();
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+ virtual size_t GetSize() const override;
+
+ s64 GetBlockSize() const {
+ return m_verification_block_size;
+ }
+
+private:
+ static void SetValidationBit(BlockHash* hash) {
+ ASSERT(hash != nullptr);
+ hash->hash[HashSize - 1] |= 0x80;
+ }
+
+ static bool IsValidationBit(const BlockHash* hash) {
+ ASSERT(hash != nullptr);
+ return (hash->hash[HashSize - 1] & 0x80) != 0;
+ }
+
+private:
+ VirtualFile m_hash_storage;
+ VirtualFile m_data_storage;
+ s64 m_verification_block_size;
+ s64 m_verification_block_order;
+ s64 m_upper_layer_verification_block_size;
+ s64 m_upper_layer_verification_block_order;
+ bool m_is_real_data;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
new file mode 100644
index 000000000..c07a127fb
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
@@ -0,0 +1,61 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fs_i_storage.h"
+
+namespace FileSys {
+
+class MemoryResourceBufferHoldStorage : public IStorage {
+ YUZU_NON_COPYABLE(MemoryResourceBufferHoldStorage);
+ YUZU_NON_MOVEABLE(MemoryResourceBufferHoldStorage);
+
+public:
+ MemoryResourceBufferHoldStorage(VirtualFile storage, size_t buffer_size)
+ : m_storage(std::move(storage)), m_buffer(::operator new(buffer_size)),
+ m_buffer_size(buffer_size) {}
+
+ virtual ~MemoryResourceBufferHoldStorage() {
+ // If we have a buffer, deallocate it.
+ if (m_buffer != nullptr) {
+ ::operator delete(m_buffer);
+ }
+ }
+
+ bool IsValid() const {
+ return m_buffer != nullptr;
+ }
+ void* GetBuffer() const {
+ return m_buffer;
+ }
+
+public:
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ // Check pre-conditions.
+ ASSERT(m_storage != nullptr);
+
+ return m_storage->Read(buffer, size, offset);
+ }
+
+ virtual size_t GetSize() const override {
+ // Check pre-conditions.
+ ASSERT(m_storage != nullptr);
+
+ return m_storage->GetSize();
+ }
+
+ virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
+ // Check pre-conditions.
+ ASSERT(m_storage != nullptr);
+
+ return m_storage->Write(buffer, size, offset);
+ }
+
+private:
+ VirtualFile m_storage;
+ void* m_buffer;
+ size_t m_buffer_size;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp
new file mode 100644
index 000000000..0f5432203
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp
@@ -0,0 +1,1351 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
+#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
+#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
+#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
+#include "core/file_sys/fssystem/fssystem_compressed_storage.h"
+#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
+#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
+#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
+#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
+#include "core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h"
+#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
+#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
+#include "core/file_sys/fssystem/fssystem_switch_storage.h"
+#include "core/file_sys/vfs_offset.h"
+#include "core/file_sys/vfs_vector.h"
+
+namespace FileSys {
+
+namespace {
+
+constexpr inline s32 IntegrityDataCacheCount = 24;
+constexpr inline s32 IntegrityHashCacheCount = 8;
+
+constexpr inline s32 IntegrityDataCacheCountForMeta = 16;
+constexpr inline s32 IntegrityHashCacheCountForMeta = 2;
+
+class SharedNcaBodyStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(SharedNcaBodyStorage);
+ YUZU_NON_MOVEABLE(SharedNcaBodyStorage);
+
+private:
+ VirtualFile m_storage;
+ std::shared_ptr<NcaReader> m_nca_reader;
+
+public:
+ SharedNcaBodyStorage(VirtualFile s, std::shared_ptr<NcaReader> r)
+ : m_storage(std::move(s)), m_nca_reader(std::move(r)) {}
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ // Validate pre-conditions.
+ ASSERT(m_storage != nullptr);
+
+ // Read from the base storage.
+ return m_storage->Read(buffer, size, offset);
+ }
+
+ virtual size_t GetSize() const override {
+ // Validate pre-conditions.
+ ASSERT(m_storage != nullptr);
+
+ return m_storage->GetSize();
+ }
+};
+
+inline s64 GetFsOffset(const NcaReader& reader, s32 fs_index) {
+ return static_cast<s64>(reader.GetFsOffset(fs_index));
+}
+
+inline s64 GetFsEndOffset(const NcaReader& reader, s32 fs_index) {
+ return static_cast<s64>(reader.GetFsEndOffset(fs_index));
+}
+
+using Sha256DataRegion = NcaFsHeader::Region;
+using IntegrityLevelInfo = NcaFsHeader::HashData::IntegrityMetaInfo::LevelHashInfo;
+using IntegrityDataInfo = IntegrityLevelInfo::HierarchicalIntegrityVerificationLevelInformation;
+
+} // namespace
+
+Result NcaFileSystemDriver::OpenStorageWithContext(VirtualFile* out,
+ NcaFsHeaderReader* out_header_reader,
+ s32 fs_index, StorageContext* ctx) {
+ // Open storage.
+ R_RETURN(this->OpenStorageImpl(out, out_header_reader, fs_index, ctx));
+}
+
+Result NcaFileSystemDriver::OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
+ s32 fs_index, StorageContext* ctx) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(out_header_reader != nullptr);
+ ASSERT(0 <= fs_index && fs_index < NcaHeader::FsCountMax);
+
+ // Validate the fs index.
+ R_UNLESS(m_reader->HasFsInfo(fs_index), ResultPartitionNotFound);
+
+ // Initialize our header reader for the fs index.
+ R_TRY(out_header_reader->Initialize(*m_reader, fs_index));
+
+ // Declare the storage we're opening.
+ VirtualFile storage;
+
+ // Process sparse layer.
+ s64 fs_data_offset = 0;
+ if (out_header_reader->ExistsSparseLayer()) {
+ // Get the sparse info.
+ const auto& sparse_info = out_header_reader->GetSparseInfo();
+
+ // Create based on whether we have a meta hash layer.
+ if (out_header_reader->ExistsSparseMetaHashLayer()) {
+ // Create the sparse storage with verification.
+ R_TRY(this->CreateSparseStorageWithVerification(
+ std::addressof(storage), std::addressof(fs_data_offset),
+ ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index,
+ out_header_reader->GetAesCtrUpperIv(), sparse_info,
+ out_header_reader->GetSparseMetaDataHashDataInfo(),
+ out_header_reader->GetSparseMetaHashType()));
+ } else {
+ // Create the sparse storage.
+ R_TRY(this->CreateSparseStorage(
+ std::addressof(storage), std::addressof(fs_data_offset),
+ ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
+ fs_index, out_header_reader->GetAesCtrUpperIv(), sparse_info));
+ }
+ } else {
+ // Get the data offsets.
+ fs_data_offset = GetFsOffset(*m_reader, fs_index);
+ const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index);
+
+ // Validate that we're within range.
+ const auto data_size = fs_end_offset - fs_data_offset;
+ R_UNLESS(data_size > 0, ResultInvalidNcaHeader);
+
+ // Create the body substorage.
+ R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size));
+
+ // Potentially save the body substorage to our context.
+ if (ctx != nullptr) {
+ ctx->body_substorage = storage;
+ }
+ }
+
+ // Process patch layer.
+ const auto& patch_info = out_header_reader->GetPatchInfo();
+ VirtualFile patch_meta_aes_ctr_ex_meta_storage;
+ VirtualFile patch_meta_indirect_meta_storage;
+ if (out_header_reader->ExistsPatchMetaHashLayer()) {
+ // Check the meta hash type.
+ R_UNLESS(out_header_reader->GetPatchMetaHashType() ==
+ NcaFsHeader::MetaDataHashType::HierarchicalIntegrity,
+ ResultRomNcaInvalidPatchMetaDataHashType);
+
+ // Create the patch meta storage.
+ R_TRY(this->CreatePatchMetaStorage(
+ std::addressof(patch_meta_aes_ctr_ex_meta_storage),
+ std::addressof(patch_meta_indirect_meta_storage),
+ ctx != nullptr ? std::addressof(ctx->patch_layer_info_storage) : nullptr, storage,
+ fs_data_offset, out_header_reader->GetAesCtrUpperIv(), patch_info,
+ out_header_reader->GetPatchMetaDataHashDataInfo()));
+ }
+
+ if (patch_info.HasAesCtrExTable()) {
+ // Check the encryption type.
+ ASSERT(out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::None ||
+ out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx ||
+ out_header_reader->GetEncryptionType() ==
+ NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash);
+
+ // Create the ex meta storage.
+ VirtualFile aes_ctr_ex_storage_meta_storage = patch_meta_aes_ctr_ex_meta_storage;
+ if (aes_ctr_ex_storage_meta_storage == nullptr) {
+ // If we don't have a meta storage, we must not have a patch meta hash layer.
+ ASSERT(!out_header_reader->ExistsPatchMetaHashLayer());
+
+ R_TRY(this->CreateAesCtrExStorageMetaStorage(
+ std::addressof(aes_ctr_ex_storage_meta_storage), storage, fs_data_offset,
+ out_header_reader->GetEncryptionType(), out_header_reader->GetAesCtrUpperIv(),
+ patch_info));
+ }
+
+ // Create the ex storage.
+ VirtualFile aes_ctr_ex_storage;
+ R_TRY(this->CreateAesCtrExStorage(
+ std::addressof(aes_ctr_ex_storage),
+ ctx != nullptr ? std::addressof(ctx->aes_ctr_ex_storage) : nullptr, std::move(storage),
+ aes_ctr_ex_storage_meta_storage, fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
+ patch_info));
+
+ // Set the base storage as the ex storage.
+ storage = std::move(aes_ctr_ex_storage);
+
+ // Potentially save storages to our context.
+ if (ctx != nullptr) {
+ ctx->aes_ctr_ex_storage_meta_storage = aes_ctr_ex_storage_meta_storage;
+ ctx->aes_ctr_ex_storage_data_storage = storage;
+ ctx->fs_data_storage = storage;
+ }
+ } else {
+ // Create the appropriate storage for the encryption type.
+ switch (out_header_reader->GetEncryptionType()) {
+ case NcaFsHeader::EncryptionType::None:
+ // If there's no encryption, use the base storage we made previously.
+ break;
+ case NcaFsHeader::EncryptionType::AesXts:
+ R_TRY(this->CreateAesXtsStorage(std::addressof(storage), std::move(storage),
+ fs_data_offset));
+ break;
+ case NcaFsHeader::EncryptionType::AesCtr:
+ R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage),
+ fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
+ AlignmentStorageRequirement::None));
+ break;
+ case NcaFsHeader::EncryptionType::AesCtrSkipLayerHash: {
+ // Create the aes ctr storage.
+ VirtualFile aes_ctr_storage;
+ R_TRY(this->CreateAesCtrStorage(std::addressof(aes_ctr_storage), storage,
+ fs_data_offset, out_header_reader->GetAesCtrUpperIv(),
+ AlignmentStorageRequirement::None));
+
+ // Create region switch storage.
+ R_TRY(this->CreateRegionSwitchStorage(std::addressof(storage), out_header_reader,
+ std::move(storage), std::move(aes_ctr_storage)));
+ } break;
+ default:
+ R_THROW(ResultInvalidNcaFsHeaderEncryptionType);
+ }
+
+ // Potentially save storages to our context.
+ if (ctx != nullptr) {
+ ctx->fs_data_storage = storage;
+ }
+ }
+
+ // Process indirect layer.
+ if (patch_info.HasIndirectTable()) {
+ // Create the indirect meta storage.
+ VirtualFile indirect_storage_meta_storage = patch_meta_indirect_meta_storage;
+ if (indirect_storage_meta_storage == nullptr) {
+ // If we don't have a meta storage, we must not have a patch meta hash layer.
+ ASSERT(!out_header_reader->ExistsPatchMetaHashLayer());
+
+ R_TRY(this->CreateIndirectStorageMetaStorage(
+ std::addressof(indirect_storage_meta_storage), storage, patch_info));
+ }
+
+ // Potentially save the indirect meta storage to our context.
+ if (ctx != nullptr) {
+ ctx->indirect_storage_meta_storage = indirect_storage_meta_storage;
+ }
+
+ // Get the original indirectable storage.
+ VirtualFile original_indirectable_storage;
+ if (m_original_reader != nullptr && m_original_reader->HasFsInfo(fs_index)) {
+ // Create a driver for the original.
+ NcaFileSystemDriver original_driver(m_original_reader);
+
+ // Create a header reader for the original.
+ NcaFsHeaderReader original_header_reader;
+ R_TRY(original_header_reader.Initialize(*m_original_reader, fs_index));
+
+ // Open original indirectable storage.
+ R_TRY(original_driver.OpenIndirectableStorageAsOriginal(
+ std::addressof(original_indirectable_storage),
+ std::addressof(original_header_reader), ctx));
+ } else if (ctx != nullptr && ctx->external_original_storage != nullptr) {
+ // Use the external original storage.
+ original_indirectable_storage = ctx->external_original_storage;
+ } else {
+ // Allocate a dummy memory storage as original storage.
+ original_indirectable_storage = std::make_shared<VectorVfsFile>();
+ R_UNLESS(original_indirectable_storage != nullptr,
+ ResultAllocationMemoryFailedAllocateShared);
+ }
+
+ // Create the indirect storage.
+ VirtualFile indirect_storage;
+ R_TRY(this->CreateIndirectStorage(
+ std::addressof(indirect_storage),
+ ctx != nullptr ? std::addressof(ctx->indirect_storage) : nullptr, std::move(storage),
+ std::move(original_indirectable_storage), std::move(indirect_storage_meta_storage),
+ patch_info));
+
+ // Set storage as the indirect storage.
+ storage = std::move(indirect_storage);
+ }
+
+ // Check if we're sparse or requested to skip the integrity layer.
+ if (out_header_reader->ExistsSparseLayer() || (ctx != nullptr && ctx->open_raw_storage)) {
+ *out = std::move(storage);
+ R_SUCCEED();
+ }
+
+ // Create the non-raw storage.
+ R_RETURN(this->CreateStorageByRawStorage(out, out_header_reader, std::move(storage), ctx));
+}
+
+Result NcaFileSystemDriver::CreateStorageByRawStorage(VirtualFile* out,
+ const NcaFsHeaderReader* header_reader,
+ VirtualFile raw_storage,
+ StorageContext* ctx) {
+ // Initialize storage as raw storage.
+ VirtualFile storage = std::move(raw_storage);
+
+ // Process hash/integrity layer.
+ switch (header_reader->GetHashType()) {
+ case NcaFsHeader::HashType::HierarchicalSha256Hash:
+ R_TRY(this->CreateSha256Storage(std::addressof(storage), std::move(storage),
+ header_reader->GetHashData().hierarchical_sha256_data));
+ break;
+ case NcaFsHeader::HashType::HierarchicalIntegrityHash:
+ R_TRY(this->CreateIntegrityVerificationStorage(
+ std::addressof(storage), std::move(storage),
+ header_reader->GetHashData().integrity_meta_info));
+ break;
+ default:
+ R_THROW(ResultInvalidNcaFsHeaderHashType);
+ }
+
+ // Process compression layer.
+ if (header_reader->ExistsCompressionLayer()) {
+ R_TRY(this->CreateCompressedStorage(
+ std::addressof(storage),
+ ctx != nullptr ? std::addressof(ctx->compressed_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->compressed_storage_meta_storage) : nullptr,
+ std::move(storage), header_reader->GetCompressionInfo()));
+ }
+
+ // Set output storage.
+ *out = std::move(storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::OpenIndirectableStorageAsOriginal(
+ VirtualFile* out, const NcaFsHeaderReader* header_reader, StorageContext* ctx) {
+ // Get the fs index.
+ const auto fs_index = header_reader->GetFsIndex();
+
+ // Declare the storage we're opening.
+ VirtualFile storage;
+
+ // Process sparse layer.
+ s64 fs_data_offset = 0;
+ if (header_reader->ExistsSparseLayer()) {
+ // Get the sparse info.
+ const auto& sparse_info = header_reader->GetSparseInfo();
+
+ // Create based on whether we have a meta hash layer.
+ if (header_reader->ExistsSparseMetaHashLayer()) {
+ // Create the sparse storage with verification.
+ R_TRY(this->CreateSparseStorageWithVerification(
+ std::addressof(storage), std::addressof(fs_data_offset),
+ ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index,
+ header_reader->GetAesCtrUpperIv(), sparse_info,
+ header_reader->GetSparseMetaDataHashDataInfo(),
+ header_reader->GetSparseMetaHashType()));
+ } else {
+ // Create the sparse storage.
+ R_TRY(this->CreateSparseStorage(
+ std::addressof(storage), std::addressof(fs_data_offset),
+ ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr,
+ ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr,
+ fs_index, header_reader->GetAesCtrUpperIv(), sparse_info));
+ }
+ } else {
+ // Get the data offsets.
+ fs_data_offset = GetFsOffset(*m_reader, fs_index);
+ const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index);
+
+ // Validate that we're within range.
+ const auto data_size = fs_end_offset - fs_data_offset;
+ R_UNLESS(data_size > 0, ResultInvalidNcaHeader);
+
+ // Create the body substorage.
+ R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size));
+ }
+
+ // Create the appropriate storage for the encryption type.
+ switch (header_reader->GetEncryptionType()) {
+ case NcaFsHeader::EncryptionType::None:
+ // If there's no encryption, use the base storage we made previously.
+ break;
+ case NcaFsHeader::EncryptionType::AesXts:
+ R_TRY(
+ this->CreateAesXtsStorage(std::addressof(storage), std::move(storage), fs_data_offset));
+ break;
+ case NcaFsHeader::EncryptionType::AesCtr:
+ R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage), fs_data_offset,
+ header_reader->GetAesCtrUpperIv(),
+ AlignmentStorageRequirement::CacheBlockSize));
+ break;
+ default:
+ R_THROW(ResultInvalidNcaFsHeaderEncryptionType);
+ }
+
+ // Set output storage.
+ *out = std::move(storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size) {
+ // Create the body storage.
+ auto body_storage =
+ std::make_shared<SharedNcaBodyStorage>(m_reader->GetSharedBodyStorage(), m_reader);
+ R_UNLESS(body_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Get the body storage size.
+ s64 body_size = body_storage->GetSize();
+
+ // Check that we're within range.
+ R_UNLESS(offset + size <= body_size, ResultNcaBaseStorageOutOfRangeB);
+
+ // Create substorage.
+ auto body_substorage = std::make_shared<OffsetVfsFile>(std::move(body_storage), size, offset);
+ R_UNLESS(body_substorage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output storage.
+ *out = std::move(body_substorage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateAesCtrStorage(
+ VirtualFile* out, VirtualFile base_storage, s64 offset, const NcaAesCtrUpperIv& upper_iv,
+ AlignmentStorageRequirement alignment_storage_requirement) {
+ // Check pre-conditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+
+ // Create the iv.
+ std::array<u8, AesCtrStorage::IvSize> iv{};
+ AesCtrStorage::MakeIv(iv.data(), sizeof(iv), upper_iv.value, offset);
+
+ // Create the ctr storage.
+ VirtualFile aes_ctr_storage;
+ if (m_reader->HasExternalDecryptionKey()) {
+ aes_ctr_storage = std::make_shared<AesCtrStorage>(
+ std::move(base_storage), m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize,
+ iv.data(), AesCtrStorage::IvSize);
+ R_UNLESS(aes_ctr_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+ } else {
+ // Create software decryption storage.
+ auto sw_storage = std::make_shared<AesCtrStorage>(
+ base_storage, m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr),
+ AesCtrStorage::KeySize, iv.data(), AesCtrStorage::IvSize);
+ R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ aes_ctr_storage = std::move(sw_storage);
+ }
+
+ // Create alignment matching storage.
+ auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>>(
+ std::move(aes_ctr_storage));
+ R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the out storage.
+ *out = std::move(aligned_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage,
+ s64 offset) {
+ // Check pre-conditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+
+ // Create the iv.
+ std::array<u8, AesXtsStorage::IvSize> iv{};
+ AesXtsStorage::MakeAesXtsIv(iv.data(), sizeof(iv), offset, NcaHeader::XtsBlockSize);
+
+ // Make the aes xts storage.
+ const auto* const key1 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts1);
+ const auto* const key2 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts2);
+ auto xts_storage =
+ std::make_shared<AesXtsStorage>(std::move(base_storage), key1, key2, AesXtsStorage::KeySize,
+ iv.data(), AesXtsStorage::IvSize, NcaHeader::XtsBlockSize);
+ R_UNLESS(xts_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create alignment matching storage.
+ auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::XtsBlockSize, 1>>(
+ std::move(xts_storage));
+ R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the out storage.
+ *out = std::move(xts_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateSparseStorageMetaStorage(VirtualFile* out,
+ VirtualFile base_storage, s64 offset,
+ const NcaAesCtrUpperIv& upper_iv,
+ const NcaSparseInfo& sparse_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+
+ // Get the base storage size.
+ s64 base_size = base_storage->GetSize();
+
+ // Get the meta extents.
+ const auto meta_offset = sparse_info.bucket.offset;
+ const auto meta_size = sparse_info.bucket.size;
+ R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB);
+
+ // Create the encrypted storage.
+ auto enc_storage =
+ std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset);
+ R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the decrypted storage.
+ VirtualFile decrypted_storage;
+ R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
+ offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv),
+ AlignmentStorageRequirement::None));
+
+ // Create buffered storage.
+ std::vector<u8> meta_data(meta_size);
+ decrypted_storage->Read(meta_data.data(), meta_size, 0);
+
+ auto buffered_storage = std::make_shared<VectorVfsFile>(std::move(meta_data));
+ R_UNLESS(buffered_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out = std::move(buffered_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out,
+ VirtualFile base_storage, s64 base_size,
+ VirtualFile meta_storage,
+ const NcaSparseInfo& sparse_info,
+ bool external_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(meta_storage != nullptr);
+
+ // Read and verify the bucket tree header.
+ BucketTree::Header header;
+ std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
+ R_TRY(header.Verify());
+
+ // Determine storage extents.
+ const auto node_offset = 0;
+ const auto node_size = SparseStorage::QueryNodeStorageSize(header.entry_count);
+ const auto entry_offset = node_offset + node_size;
+ const auto entry_size = SparseStorage::QueryEntryStorageSize(header.entry_count);
+
+ // Create the sparse storage.
+ auto sparse_storage = std::make_shared<SparseStorage>();
+ R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Sanity check that we can be doing this.
+ ASSERT(header.entry_count != 0);
+
+ // Initialize the sparse storage.
+ R_TRY(sparse_storage->Initialize(
+ std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset),
+ std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset),
+ header.entry_count));
+
+ // If not external, set the data storage.
+ if (!external_info) {
+ sparse_storage->SetDataStorage(
+ std::make_shared<OffsetVfsFile>(std::move(base_storage), base_size, 0));
+ }
+
+ // Set the output.
+ *out = std::move(sparse_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
+ std::shared_ptr<SparseStorage>* out_sparse_storage,
+ VirtualFile* out_meta_storage, s32 index,
+ const NcaAesCtrUpperIv& upper_iv,
+ const NcaSparseInfo& sparse_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(out_fs_data_offset != nullptr);
+
+ // Check the sparse info generation.
+ R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader);
+
+ // Read and verify the bucket tree header.
+ BucketTree::Header header;
+ std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
+ R_TRY(header.Verify());
+
+ // Determine the storage extents.
+ const auto fs_offset = GetFsOffset(*m_reader, index);
+ const auto fs_end_offset = GetFsEndOffset(*m_reader, index);
+ const auto fs_size = fs_end_offset - fs_offset;
+
+ // Create the sparse storage.
+ std::shared_ptr<SparseStorage> sparse_storage;
+ if (header.entry_count != 0) {
+ // Create the body substorage.
+ VirtualFile body_substorage;
+ R_TRY(this->CreateBodySubStorage(std::addressof(body_substorage),
+ sparse_info.physical_offset,
+ sparse_info.GetPhysicalSize()));
+
+ // Create the meta storage.
+ VirtualFile meta_storage;
+ R_TRY(this->CreateSparseStorageMetaStorage(std::addressof(meta_storage), body_substorage,
+ sparse_info.physical_offset, upper_iv,
+ sparse_info));
+
+ // Potentially set the output meta storage.
+ if (out_meta_storage != nullptr) {
+ *out_meta_storage = meta_storage;
+ }
+
+ // Create the sparse storage.
+ R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage,
+ sparse_info.GetPhysicalSize(), std::move(meta_storage),
+ sparse_info, false));
+ } else {
+ // If there are no entries, there's nothing to actually do.
+ sparse_storage = std::make_shared<SparseStorage>();
+ R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ sparse_storage->Initialize(fs_size);
+ }
+
+ // Potentially set the output sparse storage.
+ if (out_sparse_storage != nullptr) {
+ *out_sparse_storage = sparse_storage;
+ }
+
+ // Set the output fs data offset.
+ *out_fs_data_offset = fs_offset;
+
+ // Set the output storage.
+ *out = std::move(sparse_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateSparseStorageMetaStorageWithVerification(
+ VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
+ const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+
+ // Get the base storage size.
+ s64 base_size = base_storage->GetSize();
+
+ // Get the meta extents.
+ const auto meta_offset = sparse_info.bucket.offset;
+ const auto meta_size = sparse_info.bucket.size;
+ R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB);
+
+ // Get the meta data hash data extents.
+ const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset;
+ const s64 meta_data_hash_data_size =
+ Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize);
+ R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size,
+ ResultNcaBaseStorageOutOfRangeB);
+
+ // Check that the meta is before the hash data.
+ R_UNLESS(meta_offset + meta_size <= meta_data_hash_data_offset,
+ ResultRomNcaInvalidSparseMetaDataHashDataOffset);
+
+ // Check that offsets are appropriately aligned.
+ R_UNLESS(Common::IsAligned<s64>(meta_data_hash_data_offset, NcaHeader::CtrBlockSize),
+ ResultRomNcaInvalidSparseMetaDataHashDataOffset);
+ R_UNLESS(Common::IsAligned<s64>(meta_offset, NcaHeader::CtrBlockSize),
+ ResultInvalidNcaFsHeader);
+
+ // Create the meta storage.
+ auto enc_storage = std::make_shared<OffsetVfsFile>(
+ std::move(base_storage),
+ meta_data_hash_data_offset + meta_data_hash_data_size - meta_offset, meta_offset);
+ R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the decrypted storage.
+ VirtualFile decrypted_storage;
+ R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
+ offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv),
+ AlignmentStorageRequirement::None));
+
+ // Create the verification storage.
+ VirtualFile integrity_storage;
+ Result rc = this->CreateIntegrityVerificationStorageForMeta(
+ std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage),
+ meta_offset, meta_data_hash_data_info);
+ if (rc == ResultInvalidNcaMetaDataHashDataSize) {
+ R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataSize);
+ }
+ if (rc == ResultInvalidNcaMetaDataHashDataHash) {
+ R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataHash);
+ }
+ R_TRY(rc);
+
+ // Create the meta storage.
+ auto meta_storage = std::make_shared<OffsetVfsFile>(std::move(integrity_storage), meta_size, 0);
+ R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out = std::move(meta_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateSparseStorageWithVerification(
+ VirtualFile* out, s64* out_fs_data_offset, std::shared_ptr<SparseStorage>* out_sparse_storage,
+ VirtualFile* out_meta_storage, VirtualFile* out_layer_info_storage, s32 index,
+ const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
+ NcaFsHeader::MetaDataHashType meta_data_hash_type) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(out_fs_data_offset != nullptr);
+
+ // Check the sparse info generation.
+ R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader);
+
+ // Read and verify the bucket tree header.
+ BucketTree::Header header;
+ std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header));
+ R_TRY(header.Verify());
+
+ // Determine the storage extents.
+ const auto fs_offset = GetFsOffset(*m_reader, index);
+ const auto fs_end_offset = GetFsEndOffset(*m_reader, index);
+ const auto fs_size = fs_end_offset - fs_offset;
+
+ // Create the sparse storage.
+ std::shared_ptr<SparseStorage> sparse_storage;
+ if (header.entry_count != 0) {
+ // Create the body substorage.
+ VirtualFile body_substorage;
+ R_TRY(this->CreateBodySubStorage(
+ std::addressof(body_substorage), sparse_info.physical_offset,
+ Common::AlignUp<s64>(static_cast<s64>(meta_data_hash_data_info.offset) +
+ static_cast<s64>(meta_data_hash_data_info.size),
+ NcaHeader::CtrBlockSize)));
+
+ // Check the meta data hash type.
+ R_UNLESS(meta_data_hash_type == NcaFsHeader::MetaDataHashType::HierarchicalIntegrity,
+ ResultRomNcaInvalidSparseMetaDataHashType);
+
+ // Create the meta storage.
+ VirtualFile meta_storage;
+ R_TRY(this->CreateSparseStorageMetaStorageWithVerification(
+ std::addressof(meta_storage), out_layer_info_storage, body_substorage,
+ sparse_info.physical_offset, upper_iv, sparse_info, meta_data_hash_data_info));
+
+ // Potentially set the output meta storage.
+ if (out_meta_storage != nullptr) {
+ *out_meta_storage = meta_storage;
+ }
+
+ // Create the sparse storage.
+ R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage,
+ sparse_info.GetPhysicalSize(), std::move(meta_storage),
+ sparse_info, false));
+ } else {
+ // If there are no entries, there's nothing to actually do.
+ sparse_storage = std::make_shared<SparseStorage>();
+ R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ sparse_storage->Initialize(fs_size);
+ }
+
+ // Potentially set the output sparse storage.
+ if (out_sparse_storage != nullptr) {
+ *out_sparse_storage = sparse_storage;
+ }
+
+ // Set the output fs data offset.
+ *out_fs_data_offset = fs_offset;
+
+ // Set the output storage.
+ *out = std::move(sparse_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateAesCtrExStorageMetaStorage(
+ VirtualFile* out, VirtualFile base_storage, s64 offset,
+ NcaFsHeader::EncryptionType encryption_type, const NcaAesCtrUpperIv& upper_iv,
+ const NcaPatchInfo& patch_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(encryption_type == NcaFsHeader::EncryptionType::None ||
+ encryption_type == NcaFsHeader::EncryptionType::AesCtrEx ||
+ encryption_type == NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash);
+ ASSERT(patch_info.HasAesCtrExTable());
+
+ // Validate patch info extents.
+ R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize);
+ R_UNLESS(patch_info.aes_ctr_ex_size > 0, ResultInvalidNcaPatchInfoAesCtrExSize);
+ R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset,
+ ResultInvalidNcaPatchInfoAesCtrExOffset);
+
+ // Get the base storage size.
+ s64 base_size = base_storage->GetSize();
+
+ // Get and validate the meta extents.
+ const s64 meta_offset = patch_info.aes_ctr_ex_offset;
+ const s64 meta_size =
+ Common::AlignUp(static_cast<s64>(patch_info.aes_ctr_ex_size), NcaHeader::XtsBlockSize);
+ R_UNLESS(meta_offset + meta_size <= base_size, ResultNcaBaseStorageOutOfRangeB);
+
+ // Create the encrypted storage.
+ auto enc_storage =
+ std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset);
+ R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the decrypted storage.
+ VirtualFile decrypted_storage;
+ if (encryption_type != NcaFsHeader::EncryptionType::None) {
+ R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
+ offset + meta_offset, upper_iv,
+ AlignmentStorageRequirement::None));
+ } else {
+ // If encryption type is none, don't do any decryption.
+ decrypted_storage = std::move(enc_storage);
+ }
+
+ // Create meta storage.
+ auto meta_storage = std::make_shared<OffsetVfsFile>(decrypted_storage, meta_size, 0);
+ R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create buffered storage.
+ std::vector<u8> meta_data(meta_size);
+ meta_storage->Read(meta_data.data(), meta_size, 0);
+
+ auto buffered_storage = std::make_shared<VectorVfsFile>(std::move(meta_data));
+ R_UNLESS(buffered_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out = std::move(buffered_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateAesCtrExStorage(
+ VirtualFile* out, std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
+ VirtualFile base_storage, VirtualFile meta_storage, s64 counter_offset,
+ const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info) {
+ // Validate pre-conditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(meta_storage != nullptr);
+ ASSERT(patch_info.HasAesCtrExTable());
+
+ // Read the bucket tree header.
+ BucketTree::Header header;
+ std::memcpy(std::addressof(header), patch_info.aes_ctr_ex_header.data(), sizeof(header));
+ R_TRY(header.Verify());
+
+ // Determine the bucket extents.
+ const auto entry_count = header.entry_count;
+ const s64 data_offset = 0;
+ const s64 data_size = patch_info.aes_ctr_ex_offset;
+ const s64 node_offset = 0;
+ const s64 node_size = AesCtrCounterExtendedStorage::QueryNodeStorageSize(entry_count);
+ const s64 entry_offset = node_offset + node_size;
+ const s64 entry_size = AesCtrCounterExtendedStorage::QueryEntryStorageSize(entry_count);
+
+ // Create bucket storages.
+ auto data_storage =
+ std::make_shared<OffsetVfsFile>(std::move(base_storage), data_size, data_offset);
+ auto node_storage = std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset);
+ auto entry_storage = std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset);
+
+ // Get the secure value.
+ const auto secure_value = upper_iv.part.secure_value;
+
+ // Create the aes ctr ex storage.
+ VirtualFile aes_ctr_ex_storage;
+ if (m_reader->HasExternalDecryptionKey()) {
+ // Create the decryptor.
+ std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> decryptor;
+ R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(decryptor)));
+
+ // Create the aes ctr ex storage.
+ auto impl_storage = std::make_shared<AesCtrCounterExtendedStorage>();
+ R_UNLESS(impl_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Initialize the aes ctr ex storage.
+ R_TRY(impl_storage->Initialize(m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize,
+ secure_value, counter_offset, data_storage, node_storage,
+ entry_storage, entry_count, std::move(decryptor)));
+
+ // Potentially set the output implementation storage.
+ if (out_ext != nullptr) {
+ *out_ext = impl_storage;
+ }
+
+ // Set the implementation storage.
+ aes_ctr_ex_storage = std::move(impl_storage);
+ } else {
+ // Create the software decryptor.
+ std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> sw_decryptor;
+ R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
+
+ // Make the software storage.
+ auto sw_storage = std::make_shared<AesCtrCounterExtendedStorage>();
+ R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Initialize the software storage.
+ R_TRY(sw_storage->Initialize(m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr),
+ AesCtrStorage::KeySize, secure_value, counter_offset,
+ data_storage, node_storage, entry_storage, entry_count,
+ std::move(sw_decryptor)));
+
+ // Potentially set the output implementation storage.
+ if (out_ext != nullptr) {
+ *out_ext = sw_storage;
+ }
+
+ // Set the implementation storage.
+ aes_ctr_ex_storage = std::move(sw_storage);
+ }
+
+ // Create an alignment-matching storage.
+ using AlignedStorage = AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>;
+ auto aligned_storage = std::make_shared<AlignedStorage>(std::move(aes_ctr_ex_storage));
+ R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out = std::move(aligned_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateIndirectStorageMetaStorage(VirtualFile* out,
+ VirtualFile base_storage,
+ const NcaPatchInfo& patch_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(patch_info.HasIndirectTable());
+
+ // Get the base storage size.
+ s64 base_size = base_storage->GetSize();
+
+ // Check that we're within range.
+ R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size,
+ ResultNcaBaseStorageOutOfRangeE);
+
+ // Create the meta storage.
+ auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, patch_info.indirect_size,
+ patch_info.indirect_offset);
+ R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create buffered storage.
+ std::vector<u8> meta_data(patch_info.indirect_size);
+ meta_storage->Read(meta_data.data(), patch_info.indirect_size, 0);
+
+ auto buffered_storage = std::make_shared<VectorVfsFile>(std::move(meta_data));
+ R_UNLESS(buffered_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out = std::move(buffered_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateIndirectStorage(
+ VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind, VirtualFile base_storage,
+ VirtualFile original_data_storage, VirtualFile meta_storage, const NcaPatchInfo& patch_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(meta_storage != nullptr);
+ ASSERT(patch_info.HasIndirectTable());
+
+ // Read the bucket tree header.
+ BucketTree::Header header;
+ std::memcpy(std::addressof(header), patch_info.indirect_header.data(), sizeof(header));
+ R_TRY(header.Verify());
+
+ // Determine the storage sizes.
+ const auto node_size = IndirectStorage::QueryNodeStorageSize(header.entry_count);
+ const auto entry_size = IndirectStorage::QueryEntryStorageSize(header.entry_count);
+ R_UNLESS(node_size + entry_size <= patch_info.indirect_size,
+ ResultInvalidNcaIndirectStorageOutOfRange);
+
+ // Get the indirect data size.
+ const s64 indirect_data_size = patch_info.indirect_offset;
+ ASSERT(Common::IsAligned(indirect_data_size, NcaHeader::XtsBlockSize));
+
+ // Create the indirect data storage.
+ auto indirect_data_storage =
+ std::make_shared<OffsetVfsFile>(base_storage, indirect_data_size, 0);
+ R_UNLESS(indirect_data_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the indirect storage.
+ auto indirect_storage = std::make_shared<IndirectStorage>();
+ R_UNLESS(indirect_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Initialize the indirect storage.
+ R_TRY(indirect_storage->Initialize(
+ std::make_shared<OffsetVfsFile>(meta_storage, node_size, 0),
+ std::make_shared<OffsetVfsFile>(meta_storage, entry_size, node_size), header.entry_count));
+
+ // Get the original data size.
+ s64 original_data_size = original_data_storage->GetSize();
+
+ // Set the indirect storages.
+ indirect_storage->SetStorage(
+ 0, std::make_shared<OffsetVfsFile>(original_data_storage, original_data_size, 0));
+ indirect_storage->SetStorage(
+ 1, std::make_shared<OffsetVfsFile>(indirect_data_storage, indirect_data_size, 0));
+
+ // If necessary, set the output indirect storage.
+ if (out_ind != nullptr) {
+ *out_ind = indirect_storage;
+ }
+
+ // Set the output.
+ *out = std::move(indirect_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreatePatchMetaStorage(
+ VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
+ VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
+ const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
+ // Validate preconditions.
+ ASSERT(out_aes_ctr_ex_meta != nullptr);
+ ASSERT(out_indirect_meta != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(patch_info.HasAesCtrExTable());
+ ASSERT(patch_info.HasIndirectTable());
+ ASSERT(Common::IsAligned<s64>(patch_info.aes_ctr_ex_size, NcaHeader::XtsBlockSize));
+
+ // Validate patch info extents.
+ R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize);
+ R_UNLESS(patch_info.aes_ctr_ex_size >= 0, ResultInvalidNcaPatchInfoAesCtrExSize);
+ R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset,
+ ResultInvalidNcaPatchInfoAesCtrExOffset);
+ R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <=
+ meta_data_hash_data_info.offset,
+ ResultRomNcaInvalidPatchMetaDataHashDataOffset);
+
+ // Get the base storage size.
+ s64 base_size = base_storage->GetSize();
+
+ // Check that extents remain within range.
+ R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size,
+ ResultNcaBaseStorageOutOfRangeE);
+ R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <= base_size,
+ ResultNcaBaseStorageOutOfRangeB);
+
+ // Check that metadata hash data extents remain within range.
+ const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset;
+ const s64 meta_data_hash_data_size =
+ Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize);
+ R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size,
+ ResultNcaBaseStorageOutOfRangeB);
+
+ // Create the encrypted storage.
+ auto enc_storage = std::make_shared<OffsetVfsFile>(
+ std::move(base_storage),
+ meta_data_hash_data_offset + meta_data_hash_data_size - patch_info.indirect_offset,
+ patch_info.indirect_offset);
+ R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the decrypted storage.
+ VirtualFile decrypted_storage;
+ R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage),
+ offset + patch_info.indirect_offset, upper_iv,
+ AlignmentStorageRequirement::None));
+
+ // Create the verification storage.
+ VirtualFile integrity_storage;
+ Result rc = this->CreateIntegrityVerificationStorageForMeta(
+ std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage),
+ patch_info.indirect_offset, meta_data_hash_data_info);
+ if (rc == ResultInvalidNcaMetaDataHashDataSize) {
+ R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataSize);
+ }
+ if (rc == ResultInvalidNcaMetaDataHashDataHash) {
+ R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataHash);
+ }
+ R_TRY(rc);
+
+ // Create the indirect meta storage.
+ auto indirect_meta_storage =
+ std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.indirect_size,
+ patch_info.indirect_offset - patch_info.indirect_offset);
+ R_UNLESS(indirect_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the aes ctr ex meta storage.
+ auto aes_ctr_ex_meta_storage =
+ std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.aes_ctr_ex_size,
+ patch_info.aes_ctr_ex_offset - patch_info.indirect_offset);
+ R_UNLESS(aes_ctr_ex_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out_aes_ctr_ex_meta = std::move(aes_ctr_ex_meta_storage);
+ *out_indirect_meta = std::move(indirect_meta_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateSha256Storage(
+ VirtualFile* out, VirtualFile base_storage,
+ const NcaFsHeader::HashData::HierarchicalSha256Data& hash_data) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+
+ // Define storage types.
+ using VerificationStorage = HierarchicalSha256Storage;
+
+ // Validate the hash data.
+ R_UNLESS(Common::IsPowerOfTwo(hash_data.hash_block_size),
+ ResultInvalidHierarchicalSha256BlockSize);
+ R_UNLESS(hash_data.hash_layer_count == VerificationStorage::LayerCount - 1,
+ ResultInvalidHierarchicalSha256LayerCount);
+
+ // Get the regions.
+ const auto& hash_region = hash_data.hash_layer_region[0];
+ const auto& data_region = hash_data.hash_layer_region[1];
+
+ // Determine buffer sizes.
+ constexpr s32 CacheBlockCount = 2;
+ const auto hash_buffer_size = static_cast<size_t>(hash_region.size);
+ const auto cache_buffer_size = CacheBlockCount * hash_data.hash_block_size;
+ const auto total_buffer_size = hash_buffer_size + cache_buffer_size;
+
+ // Make a buffer holder storage.
+ auto buffer_hold_storage = std::make_shared<MemoryResourceBufferHoldStorage>(
+ std::move(base_storage), total_buffer_size);
+ R_UNLESS(buffer_hold_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+ R_UNLESS(buffer_hold_storage->IsValid(), ResultAllocationMemoryFailedInNcaFileSystemDriverI);
+
+ // Get storage size.
+ s64 base_size = buffer_hold_storage->GetSize();
+
+ // Check that we're within range.
+ R_UNLESS(hash_region.offset + hash_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC);
+ R_UNLESS(data_region.offset + data_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC);
+
+ // Create the master hash storage.
+ auto master_hash_storage =
+ std::make_shared<ArrayVfsFile<sizeof(Hash)>>(hash_data.fs_data_master_hash.value);
+
+ // Make the verification storage.
+ auto verification_storage = std::make_shared<VerificationStorage>();
+ R_UNLESS(verification_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Make layer storages.
+ std::array<VirtualFile, VerificationStorage::LayerCount> layer_storages{
+ std::make_shared<OffsetVfsFile>(master_hash_storage, sizeof(Hash), 0),
+ std::make_shared<OffsetVfsFile>(buffer_hold_storage, hash_region.size, hash_region.offset),
+ std::make_shared<OffsetVfsFile>(buffer_hold_storage, data_region.size, data_region.offset),
+ };
+
+ // Initialize the verification storage.
+ R_TRY(verification_storage->Initialize(layer_storages.data(), VerificationStorage::LayerCount,
+ hash_data.hash_block_size,
+ buffer_hold_storage->GetBuffer(), hash_buffer_size));
+
+ // Set the output.
+ *out = std::move(verification_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateIntegrityVerificationStorage(
+ VirtualFile* out, VirtualFile base_storage,
+ const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info) {
+ R_RETURN(this->CreateIntegrityVerificationStorageImpl(
+ out, base_storage, meta_info, 0, IntegrityDataCacheCount, IntegrityHashCacheCount,
+ HierarchicalIntegrityVerificationStorage::GetDefaultDataCacheBufferLevel(
+ meta_info.level_hash_info.max_layers)));
+}
+
+Result NcaFileSystemDriver::CreateIntegrityVerificationStorageForMeta(
+ VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+
+ // Check the meta data hash data size.
+ R_UNLESS(meta_data_hash_data_info.size == sizeof(NcaMetaDataHashData),
+ ResultInvalidNcaMetaDataHashDataSize);
+
+ // Read the meta data hash data.
+ NcaMetaDataHashData meta_data_hash_data;
+ base_storage->ReadObject(std::addressof(meta_data_hash_data),
+ meta_data_hash_data_info.offset - offset);
+
+ // Set the out layer info storage, if necessary.
+ if (out_layer_info_storage != nullptr) {
+ auto layer_info_storage = std::make_shared<OffsetVfsFile>(
+ base_storage,
+ meta_data_hash_data_info.offset + meta_data_hash_data_info.size -
+ meta_data_hash_data.layer_info_offset,
+ meta_data_hash_data.layer_info_offset - offset);
+ R_UNLESS(layer_info_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ *out_layer_info_storage = std::move(layer_info_storage);
+ }
+
+ // Create the meta storage.
+ auto meta_storage = std::make_shared<OffsetVfsFile>(
+ std::move(base_storage), meta_data_hash_data_info.offset - offset, 0);
+ R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Create the integrity verification storage.
+ R_RETURN(this->CreateIntegrityVerificationStorageImpl(
+ out, std::move(meta_storage), meta_data_hash_data.integrity_meta_info,
+ meta_data_hash_data.layer_info_offset - offset, IntegrityDataCacheCountForMeta,
+ IntegrityHashCacheCountForMeta, 0));
+}
+
+Result NcaFileSystemDriver::CreateIntegrityVerificationStorageImpl(
+ VirtualFile* out, VirtualFile base_storage,
+ const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
+ int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
+ // Validate preconditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(layer_info_offset >= 0);
+
+ // Define storage types.
+ using VerificationStorage = HierarchicalIntegrityVerificationStorage;
+ using StorageInfo = VerificationStorage::HierarchicalStorageInformation;
+
+ // Validate the meta info.
+ HierarchicalIntegrityVerificationInformation level_hash_info;
+ std::memcpy(std::addressof(level_hash_info), std::addressof(meta_info.level_hash_info),
+ sizeof(level_hash_info));
+
+ R_UNLESS(IntegrityMinLayerCount <= level_hash_info.max_layers,
+ ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount);
+ R_UNLESS(level_hash_info.max_layers <= IntegrityMaxLayerCount,
+ ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount);
+
+ // Get the base storage size.
+ s64 base_storage_size = base_storage->GetSize();
+
+ // Create storage info.
+ StorageInfo storage_info;
+ for (s32 i = 0; i < static_cast<s32>(level_hash_info.max_layers - 2); ++i) {
+ const auto& layer_info = level_hash_info.info[i];
+ R_UNLESS(layer_info_offset + layer_info.offset + layer_info.size <= base_storage_size,
+ ResultNcaBaseStorageOutOfRangeD);
+
+ storage_info[i + 1] = std::make_shared<OffsetVfsFile>(
+ base_storage, layer_info.size, layer_info_offset + layer_info.offset);
+ }
+
+ // Set the last layer info.
+ const auto& layer_info = level_hash_info.info[level_hash_info.max_layers - 2];
+ const s64 last_layer_info_offset = layer_info_offset > 0 ? 0LL : layer_info.offset.Get();
+ R_UNLESS(last_layer_info_offset + layer_info.size <= base_storage_size,
+ ResultNcaBaseStorageOutOfRangeD);
+ if (layer_info_offset > 0) {
+ R_UNLESS(last_layer_info_offset + layer_info.size <= layer_info_offset,
+ ResultRomNcaInvalidIntegrityLayerInfoOffset);
+ }
+ storage_info.SetDataStorage(std::make_shared<OffsetVfsFile>(
+ std::move(base_storage), layer_info.size, last_layer_info_offset));
+
+ // Make the integrity romfs storage.
+ auto integrity_storage = std::make_shared<IntegrityRomFsStorage>();
+ R_UNLESS(integrity_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Initialize the integrity storage.
+ R_TRY(integrity_storage->Initialize(level_hash_info, meta_info.master_hash, storage_info,
+ max_data_cache_entries, max_hash_cache_entries,
+ buffer_level));
+
+ // Set the output.
+ *out = std::move(integrity_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateRegionSwitchStorage(VirtualFile* out,
+ const NcaFsHeaderReader* header_reader,
+ VirtualFile inside_storage,
+ VirtualFile outside_storage) {
+ // Check pre-conditions.
+ ASSERT(header_reader->GetHashType() == NcaFsHeader::HashType::HierarchicalIntegrityHash);
+
+ // Create the region.
+ RegionSwitchStorage::Region region = {};
+ R_TRY(header_reader->GetHashTargetOffset(std::addressof(region.size)));
+
+ // Create the region switch storage.
+ auto region_switch_storage = std::make_shared<RegionSwitchStorage>(
+ std::move(inside_storage), std::move(outside_storage), region);
+ R_UNLESS(region_switch_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Set the output.
+ *out = std::move(region_switch_storage);
+ R_SUCCEED();
+}
+
+Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out,
+ std::shared_ptr<CompressedStorage>* out_cmp,
+ VirtualFile* out_meta, VirtualFile base_storage,
+ const NcaCompressionInfo& compression_info) {
+ R_RETURN(this->CreateCompressedStorage(out, out_cmp, out_meta, std::move(base_storage),
+ compression_info, m_reader->GetDecompressor()));
+}
+
+Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out,
+ std::shared_ptr<CompressedStorage>* out_cmp,
+ VirtualFile* out_meta, VirtualFile base_storage,
+ const NcaCompressionInfo& compression_info,
+ GetDecompressorFunction get_decompressor) {
+ // Check pre-conditions.
+ ASSERT(out != nullptr);
+ ASSERT(base_storage != nullptr);
+ ASSERT(get_decompressor != nullptr);
+
+ // Read and verify the bucket tree header.
+ BucketTree::Header header;
+ std::memcpy(std::addressof(header), compression_info.bucket.header.data(), sizeof(header));
+ R_TRY(header.Verify());
+
+ // Determine the storage extents.
+ const auto table_offset = compression_info.bucket.offset;
+ const auto table_size = compression_info.bucket.size;
+ const auto node_size = CompressedStorage::QueryNodeStorageSize(header.entry_count);
+ const auto entry_size = CompressedStorage::QueryEntryStorageSize(header.entry_count);
+ R_UNLESS(node_size + entry_size <= table_size, ResultInvalidCompressedStorageSize);
+
+ // If we should, set the output meta storage.
+ if (out_meta != nullptr) {
+ auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, table_size, table_offset);
+ R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ *out_meta = std::move(meta_storage);
+ }
+
+ // Allocate the compressed storage.
+ auto compressed_storage = std::make_shared<CompressedStorage>();
+ R_UNLESS(compressed_storage != nullptr, ResultAllocationMemoryFailedAllocateShared);
+
+ // Initialize the compressed storage.
+ R_TRY(compressed_storage->Initialize(
+ std::make_shared<OffsetVfsFile>(base_storage, table_offset, 0),
+ std::make_shared<OffsetVfsFile>(base_storage, node_size, table_offset),
+ std::make_shared<OffsetVfsFile>(base_storage, entry_size, table_offset + node_size),
+ header.entry_count, 64_KiB, 640_KiB, get_decompressor, 16_KiB, 16_KiB, 32));
+
+ // Potentially set the output compressed storage.
+ if (out_cmp) {
+ *out_cmp = compressed_storage;
+ }
+
+ // Set the output.
+ *out = std::move(compressed_storage);
+ R_SUCCEED();
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h
new file mode 100644
index 000000000..5771a21fc
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h
@@ -0,0 +1,364 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fssystem_compression_common.h"
+#include "core/file_sys/fssystem/fssystem_nca_header.h"
+#include "core/file_sys/vfs.h"
+
+namespace FileSys {
+
+class CompressedStorage;
+class AesCtrCounterExtendedStorage;
+class IndirectStorage;
+class SparseStorage;
+
+struct NcaCryptoConfiguration;
+
+using KeyGenerationFunction = void (*)(void* dst_key, size_t dst_key_size, const void* src_key,
+ size_t src_key_size, s32 key_type);
+using VerifySign1Function = bool (*)(const void* sig, size_t sig_size, const void* data,
+ size_t data_size, u8 generation);
+
+struct NcaCryptoConfiguration {
+ static constexpr size_t Rsa2048KeyModulusSize = 2048 / 8;
+ static constexpr size_t Rsa2048KeyPublicExponentSize = 3;
+ static constexpr size_t Rsa2048KeyPrivateExponentSize = Rsa2048KeyModulusSize;
+
+ static constexpr size_t Aes128KeySize = 128 / 8;
+
+ static constexpr size_t Header1SignatureKeyGenerationMax = 1;
+
+ static constexpr s32 KeyAreaEncryptionKeyIndexCount = 3;
+ static constexpr s32 HeaderEncryptionKeyCount = 2;
+
+ static constexpr u8 KeyAreaEncryptionKeyIndexZeroKey = 0xFF;
+
+ static constexpr size_t KeyGenerationMax = 32;
+
+ std::array<const u8*, Header1SignatureKeyGenerationMax + 1> header_1_sign_key_moduli;
+ std::array<u8, Rsa2048KeyPublicExponentSize> header_1_sign_key_public_exponent;
+ std::array<std::array<u8, Aes128KeySize>, KeyAreaEncryptionKeyIndexCount>
+ key_area_encryption_key_source;
+ std::array<u8, Aes128KeySize> header_encryption_key_source;
+ std::array<std::array<u8, Aes128KeySize>, HeaderEncryptionKeyCount>
+ header_encrypted_encryption_keys;
+ KeyGenerationFunction generate_key;
+ VerifySign1Function verify_sign1;
+ bool is_plaintext_header_available;
+ bool is_available_sw_key;
+};
+static_assert(std::is_trivial_v<NcaCryptoConfiguration>);
+
+struct NcaCompressionConfiguration {
+ GetDecompressorFunction get_decompressor;
+};
+static_assert(std::is_trivial_v<NcaCompressionConfiguration>);
+
+constexpr inline s32 KeyAreaEncryptionKeyCount =
+ NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount *
+ NcaCryptoConfiguration::KeyGenerationMax;
+
+enum class KeyType : s32 {
+ ZeroKey = -2,
+ InvalidKey = -1,
+ NcaHeaderKey1 = KeyAreaEncryptionKeyCount + 0,
+ NcaHeaderKey2 = KeyAreaEncryptionKeyCount + 1,
+ NcaExternalKey = KeyAreaEncryptionKeyCount + 2,
+ SaveDataDeviceUniqueMac = KeyAreaEncryptionKeyCount + 3,
+ SaveDataSeedUniqueMac = KeyAreaEncryptionKeyCount + 4,
+ SaveDataTransferMac = KeyAreaEncryptionKeyCount + 5,
+};
+
+constexpr inline bool IsInvalidKeyTypeValue(s32 key_type) {
+ return key_type < 0;
+}
+
+constexpr inline s32 GetKeyTypeValue(u8 key_index, u8 key_generation) {
+ if (key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey) {
+ return static_cast<s32>(KeyType::ZeroKey);
+ }
+
+ if (key_index >= NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount) {
+ return static_cast<s32>(KeyType::InvalidKey);
+ }
+
+ return NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount * key_generation + key_index;
+}
+
+class NcaReader {
+ YUZU_NON_COPYABLE(NcaReader);
+ YUZU_NON_MOVEABLE(NcaReader);
+
+public:
+ NcaReader();
+ ~NcaReader();
+
+ Result Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
+ const NcaCompressionConfiguration& compression_cfg);
+
+ VirtualFile GetSharedBodyStorage();
+ u32 GetMagic() const;
+ NcaHeader::DistributionType GetDistributionType() const;
+ NcaHeader::ContentType GetContentType() const;
+ u8 GetHeaderSign1KeyGeneration() const;
+ u8 GetKeyGeneration() const;
+ u8 GetKeyIndex() const;
+ u64 GetContentSize() const;
+ u64 GetProgramId() const;
+ u32 GetContentIndex() const;
+ u32 GetSdkAddonVersion() const;
+ void GetRightsId(u8* dst, size_t dst_size) const;
+ bool HasFsInfo(s32 index) const;
+ s32 GetFsCount() const;
+ const Hash& GetFsHeaderHash(s32 index) const;
+ void GetFsHeaderHash(Hash* dst, s32 index) const;
+ void GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const;
+ u64 GetFsOffset(s32 index) const;
+ u64 GetFsEndOffset(s32 index) const;
+ u64 GetFsSize(s32 index) const;
+ void GetEncryptedKey(void* dst, size_t size) const;
+ const void* GetDecryptionKey(s32 index) const;
+ bool HasValidInternalKey() const;
+ bool HasInternalDecryptionKeyForAesHw() const;
+ bool IsSoftwareAesPrioritized() const;
+ void PrioritizeSoftwareAes();
+ bool IsAvailableSwKey() const;
+ bool HasExternalDecryptionKey() const;
+ const void* GetExternalDecryptionKey() const;
+ void SetExternalDecryptionKey(const void* src, size_t size);
+ void GetRawData(void* dst, size_t dst_size) const;
+ NcaHeader::EncryptionType GetEncryptionType() const;
+ Result ReadHeader(NcaFsHeader* dst, s32 index) const;
+
+ GetDecompressorFunction GetDecompressor() const;
+
+ bool GetHeaderSign1Valid() const;
+
+ void GetHeaderSign2(void* dst, size_t size) const;
+
+private:
+ NcaHeader m_header;
+ std::array<std::array<u8, NcaCryptoConfiguration::Aes128KeySize>,
+ NcaHeader::DecryptionKey_Count>
+ m_decryption_keys;
+ VirtualFile m_body_storage;
+ VirtualFile m_header_storage;
+ std::array<u8, NcaCryptoConfiguration::Aes128KeySize> m_external_decryption_key;
+ bool m_is_software_aes_prioritized;
+ bool m_is_available_sw_key;
+ NcaHeader::EncryptionType m_header_encryption_type;
+ bool m_is_header_sign1_signature_valid;
+ GetDecompressorFunction m_get_decompressor;
+};
+
+class NcaFsHeaderReader {
+ YUZU_NON_COPYABLE(NcaFsHeaderReader);
+ YUZU_NON_MOVEABLE(NcaFsHeaderReader);
+
+public:
+ NcaFsHeaderReader() : m_fs_index(-1) {
+ std::memset(std::addressof(m_data), 0, sizeof(m_data));
+ }
+
+ Result Initialize(const NcaReader& reader, s32 index);
+ bool IsInitialized() const {
+ return m_fs_index >= 0;
+ }
+
+ void GetRawData(void* dst, size_t dst_size) const;
+
+ NcaFsHeader::HashData& GetHashData();
+ const NcaFsHeader::HashData& GetHashData() const;
+ u16 GetVersion() const;
+ s32 GetFsIndex() const;
+ NcaFsHeader::FsType GetFsType() const;
+ NcaFsHeader::HashType GetHashType() const;
+ NcaFsHeader::EncryptionType GetEncryptionType() const;
+ NcaPatchInfo& GetPatchInfo();
+ const NcaPatchInfo& GetPatchInfo() const;
+ const NcaAesCtrUpperIv GetAesCtrUpperIv() const;
+
+ bool IsSkipLayerHashEncryption() const;
+ Result GetHashTargetOffset(s64* out) const;
+
+ bool ExistsSparseLayer() const;
+ NcaSparseInfo& GetSparseInfo();
+ const NcaSparseInfo& GetSparseInfo() const;
+
+ bool ExistsCompressionLayer() const;
+ NcaCompressionInfo& GetCompressionInfo();
+ const NcaCompressionInfo& GetCompressionInfo() const;
+
+ bool ExistsPatchMetaHashLayer() const;
+ NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo();
+ const NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo() const;
+ NcaFsHeader::MetaDataHashType GetPatchMetaHashType() const;
+
+ bool ExistsSparseMetaHashLayer() const;
+ NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo();
+ const NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo() const;
+ NcaFsHeader::MetaDataHashType GetSparseMetaHashType() const;
+
+private:
+ NcaFsHeader m_data;
+ s32 m_fs_index;
+};
+
+class NcaFileSystemDriver {
+ YUZU_NON_COPYABLE(NcaFileSystemDriver);
+ YUZU_NON_MOVEABLE(NcaFileSystemDriver);
+
+public:
+ struct StorageContext {
+ bool open_raw_storage;
+ VirtualFile body_substorage;
+ std::shared_ptr<SparseStorage> current_sparse_storage;
+ VirtualFile sparse_storage_meta_storage;
+ std::shared_ptr<SparseStorage> original_sparse_storage;
+ void* external_current_sparse_storage;
+ void* external_original_sparse_storage;
+ VirtualFile aes_ctr_ex_storage_meta_storage;
+ VirtualFile aes_ctr_ex_storage_data_storage;
+ std::shared_ptr<AesCtrCounterExtendedStorage> aes_ctr_ex_storage;
+ VirtualFile indirect_storage_meta_storage;
+ std::shared_ptr<IndirectStorage> indirect_storage;
+ VirtualFile fs_data_storage;
+ VirtualFile compressed_storage_meta_storage;
+ std::shared_ptr<CompressedStorage> compressed_storage;
+
+ VirtualFile patch_layer_info_storage;
+ VirtualFile sparse_layer_info_storage;
+
+ VirtualFile external_original_storage;
+ };
+
+private:
+ enum class AlignmentStorageRequirement {
+ CacheBlockSize = 0,
+ None = 1,
+ };
+
+public:
+ static Result SetupFsHeaderReader(NcaFsHeaderReader* out, const NcaReader& reader,
+ s32 fs_index);
+
+public:
+ NcaFileSystemDriver(std::shared_ptr<NcaReader> reader) : m_original_reader(), m_reader(reader) {
+ ASSERT(m_reader != nullptr);
+ }
+
+ NcaFileSystemDriver(std::shared_ptr<NcaReader> original_reader,
+ std::shared_ptr<NcaReader> reader)
+ : m_original_reader(original_reader), m_reader(reader) {
+ ASSERT(m_reader != nullptr);
+ }
+
+ Result OpenStorageWithContext(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
+ s32 fs_index, StorageContext* ctx);
+
+ Result OpenStorage(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index) {
+ // Create a storage context.
+ StorageContext ctx{};
+
+ // Open the storage.
+ R_RETURN(OpenStorageWithContext(out, out_header_reader, fs_index, std::addressof(ctx)));
+ }
+
+public:
+ Result CreateStorageByRawStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
+ VirtualFile raw_storage, StorageContext* ctx);
+
+private:
+ Result OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index,
+ StorageContext* ctx);
+
+ Result OpenIndirectableStorageAsOriginal(VirtualFile* out,
+ const NcaFsHeaderReader* header_reader,
+ StorageContext* ctx);
+
+ Result CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size);
+
+ Result CreateAesCtrStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
+ const NcaAesCtrUpperIv& upper_iv,
+ AlignmentStorageRequirement alignment_storage_requirement);
+ Result CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage, s64 offset);
+
+ Result CreateSparseStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
+ const NcaAesCtrUpperIv& upper_iv,
+ const NcaSparseInfo& sparse_info);
+ Result CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out, VirtualFile base_storage,
+ s64 base_size, VirtualFile meta_storage,
+ const NcaSparseInfo& sparse_info, bool external_info);
+ Result CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
+ std::shared_ptr<SparseStorage>* out_sparse_storage,
+ VirtualFile* out_meta_storage, s32 index,
+ const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info);
+
+ Result CreateSparseStorageMetaStorageWithVerification(
+ VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
+ const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
+ Result CreateSparseStorageWithVerification(
+ VirtualFile* out, s64* out_fs_data_offset,
+ std::shared_ptr<SparseStorage>* out_sparse_storage, VirtualFile* out_meta_storage,
+ VirtualFile* out_verification, s32 index, const NcaAesCtrUpperIv& upper_iv,
+ const NcaSparseInfo& sparse_info, const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
+ NcaFsHeader::MetaDataHashType meta_data_hash_type);
+
+ Result CreateAesCtrExStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
+ NcaFsHeader::EncryptionType encryption_type,
+ const NcaAesCtrUpperIv& upper_iv,
+ const NcaPatchInfo& patch_info);
+ Result CreateAesCtrExStorage(VirtualFile* out,
+ std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
+ VirtualFile base_storage, VirtualFile meta_storage,
+ s64 counter_offset, const NcaAesCtrUpperIv& upper_iv,
+ const NcaPatchInfo& patch_info);
+
+ Result CreateIndirectStorageMetaStorage(VirtualFile* out, VirtualFile base_storage,
+ const NcaPatchInfo& patch_info);
+ Result CreateIndirectStorage(VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind,
+ VirtualFile base_storage, VirtualFile original_data_storage,
+ VirtualFile meta_storage, const NcaPatchInfo& patch_info);
+
+ Result CreatePatchMetaStorage(VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
+ VirtualFile* out_verification, VirtualFile base_storage,
+ s64 offset, const NcaAesCtrUpperIv& upper_iv,
+ const NcaPatchInfo& patch_info,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
+
+ Result CreateSha256Storage(VirtualFile* out, VirtualFile base_storage,
+ const NcaFsHeader::HashData::HierarchicalSha256Data& sha256_data);
+
+ Result CreateIntegrityVerificationStorage(
+ VirtualFile* out, VirtualFile base_storage,
+ const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info);
+ Result CreateIntegrityVerificationStorageForMeta(
+ VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
+ const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
+ Result CreateIntegrityVerificationStorageImpl(
+ VirtualFile* out, VirtualFile base_storage,
+ const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
+ int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
+
+ Result CreateRegionSwitchStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
+ VirtualFile inside_storage, VirtualFile outside_storage);
+
+ Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
+ VirtualFile* out_meta, VirtualFile base_storage,
+ const NcaCompressionInfo& compression_info);
+
+public:
+ Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
+ VirtualFile* out_meta, VirtualFile base_storage,
+ const NcaCompressionInfo& compression_info,
+ GetDecompressorFunction get_decompressor);
+
+private:
+ std::shared_ptr<NcaReader> m_original_reader;
+ std::shared_ptr<NcaReader> m_reader;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.cpp b/src/core/file_sys/fssystem/fssystem_nca_header.cpp
new file mode 100644
index 000000000..bf5742d39
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_header.cpp
@@ -0,0 +1,20 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_nca_header.h"
+
+namespace FileSys {
+
+u8 NcaHeader::GetProperKeyGeneration() const {
+ return std::max(this->key_generation, this->key_generation_2);
+}
+
+bool NcaPatchInfo::HasIndirectTable() const {
+ return this->indirect_size != 0;
+}
+
+bool NcaPatchInfo::HasAesCtrExTable() const {
+ return this->aes_ctr_ex_size != 0;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.h b/src/core/file_sys/fssystem/fssystem_nca_header.h
new file mode 100644
index 000000000..a02c5d881
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_header.h
@@ -0,0 +1,338 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/literals.h"
+
+#include "core/file_sys/errors.h"
+#include "core/file_sys/fssystem/fs_types.h"
+
+namespace FileSys {
+
+using namespace Common::Literals;
+
+struct Hash {
+ static constexpr std::size_t Size = 256 / 8;
+ std::array<u8, Size> value;
+};
+static_assert(sizeof(Hash) == Hash::Size);
+static_assert(std::is_trivial_v<Hash>);
+
+using NcaDigest = Hash;
+
+struct NcaHeader {
+ enum class ContentType : u8 {
+ Program = 0,
+ Meta = 1,
+ Control = 2,
+ Manual = 3,
+ Data = 4,
+ PublicData = 5,
+
+ Start = Program,
+ End = PublicData,
+ };
+
+ enum class DistributionType : u8 {
+ Download = 0,
+ GameCard = 1,
+
+ Start = Download,
+ End = GameCard,
+ };
+
+ enum class EncryptionType : u8 {
+ Auto = 0,
+ None = 1,
+ };
+
+ enum DecryptionKey {
+ DecryptionKey_AesXts = 0,
+ DecryptionKey_AesXts1 = DecryptionKey_AesXts,
+ DecryptionKey_AesXts2 = 1,
+ DecryptionKey_AesCtr = 2,
+ DecryptionKey_AesCtrEx = 3,
+ DecryptionKey_AesCtrHw = 4,
+ DecryptionKey_Count,
+ };
+
+ struct FsInfo {
+ u32 start_sector;
+ u32 end_sector;
+ u32 hash_sectors;
+ u32 reserved;
+ };
+ static_assert(sizeof(FsInfo) == 0x10);
+ static_assert(std::is_trivial_v<FsInfo>);
+
+ static constexpr u32 Magic0 = Common::MakeMagic('N', 'C', 'A', '0');
+ static constexpr u32 Magic1 = Common::MakeMagic('N', 'C', 'A', '1');
+ static constexpr u32 Magic2 = Common::MakeMagic('N', 'C', 'A', '2');
+ static constexpr u32 Magic3 = Common::MakeMagic('N', 'C', 'A', '3');
+
+ static constexpr u32 Magic = Magic3;
+
+ static constexpr std::size_t Size = 1_KiB;
+ static constexpr s32 FsCountMax = 4;
+ static constexpr std::size_t HeaderSignCount = 2;
+ static constexpr std::size_t HeaderSignSize = 0x100;
+ static constexpr std::size_t EncryptedKeyAreaSize = 0x100;
+ static constexpr std::size_t SectorSize = 0x200;
+ static constexpr std::size_t SectorShift = 9;
+ static constexpr std::size_t RightsIdSize = 0x10;
+ static constexpr std::size_t XtsBlockSize = 0x200;
+ static constexpr std::size_t CtrBlockSize = 0x10;
+
+ static_assert(SectorSize == (1 << SectorShift));
+
+ // Data members.
+ std::array<u8, HeaderSignSize> header_sign_1;
+ std::array<u8, HeaderSignSize> header_sign_2;
+ u32 magic;
+ DistributionType distribution_type;
+ ContentType content_type;
+ u8 key_generation;
+ u8 key_index;
+ u64 content_size;
+ u64 program_id;
+ u32 content_index;
+ u32 sdk_addon_version;
+ u8 key_generation_2;
+ u8 header1_signature_key_generation;
+ std::array<u8, 2> reserved_222;
+ std::array<u32, 3> reserved_224;
+ std::array<u8, RightsIdSize> rights_id;
+ std::array<FsInfo, FsCountMax> fs_info;
+ std::array<Hash, FsCountMax> fs_header_hash;
+ std::array<u8, EncryptedKeyAreaSize> encrypted_key_area;
+
+ static constexpr u64 SectorToByte(u32 sector) {
+ return static_cast<u64>(sector) << SectorShift;
+ }
+
+ static constexpr u32 ByteToSector(u64 byte) {
+ return static_cast<u32>(byte >> SectorShift);
+ }
+
+ u8 GetProperKeyGeneration() const;
+};
+static_assert(sizeof(NcaHeader) == NcaHeader::Size);
+static_assert(std::is_trivial_v<NcaHeader>);
+
+struct NcaBucketInfo {
+ static constexpr size_t HeaderSize = 0x10;
+ Int64 offset;
+ Int64 size;
+ std::array<u8, HeaderSize> header;
+};
+static_assert(std::is_trivial_v<NcaBucketInfo>);
+
+struct NcaPatchInfo {
+ static constexpr size_t Size = 0x40;
+ static constexpr size_t Offset = 0x100;
+
+ Int64 indirect_offset;
+ Int64 indirect_size;
+ std::array<u8, NcaBucketInfo::HeaderSize> indirect_header;
+ Int64 aes_ctr_ex_offset;
+ Int64 aes_ctr_ex_size;
+ std::array<u8, NcaBucketInfo::HeaderSize> aes_ctr_ex_header;
+
+ bool HasIndirectTable() const;
+ bool HasAesCtrExTable() const;
+};
+static_assert(std::is_trivial_v<NcaPatchInfo>);
+
+union NcaAesCtrUpperIv {
+ u64 value;
+ struct {
+ u32 generation;
+ u32 secure_value;
+ } part;
+};
+static_assert(std::is_trivial_v<NcaAesCtrUpperIv>);
+
+struct NcaSparseInfo {
+ NcaBucketInfo bucket;
+ Int64 physical_offset;
+ u16 generation;
+ std::array<u8, 6> reserved;
+
+ s64 GetPhysicalSize() const {
+ return this->bucket.offset + this->bucket.size;
+ }
+
+ u32 GetGeneration() const {
+ return static_cast<u32>(this->generation) << 16;
+ }
+
+ const NcaAesCtrUpperIv MakeAesCtrUpperIv(NcaAesCtrUpperIv upper_iv) const {
+ NcaAesCtrUpperIv sparse_upper_iv = upper_iv;
+ sparse_upper_iv.part.generation = this->GetGeneration();
+ return sparse_upper_iv;
+ }
+};
+static_assert(std::is_trivial_v<NcaSparseInfo>);
+
+struct NcaCompressionInfo {
+ NcaBucketInfo bucket;
+ std::array<u8, 8> resreved;
+};
+static_assert(std::is_trivial_v<NcaCompressionInfo>);
+
+struct NcaMetaDataHashDataInfo {
+ Int64 offset;
+ Int64 size;
+ Hash hash;
+};
+static_assert(std::is_trivial_v<NcaMetaDataHashDataInfo>);
+
+struct NcaFsHeader {
+ static constexpr size_t Size = 0x200;
+ static constexpr size_t HashDataOffset = 0x8;
+
+ struct Region {
+ Int64 offset;
+ Int64 size;
+ };
+ static_assert(std::is_trivial_v<Region>);
+
+ enum class FsType : u8 {
+ RomFs = 0,
+ PartitionFs = 1,
+ };
+
+ enum class EncryptionType : u8 {
+ Auto = 0,
+ None = 1,
+ AesXts = 2,
+ AesCtr = 3,
+ AesCtrEx = 4,
+ AesCtrSkipLayerHash = 5,
+ AesCtrExSkipLayerHash = 6,
+ };
+
+ enum class HashType : u8 {
+ Auto = 0,
+ None = 1,
+ HierarchicalSha256Hash = 2,
+ HierarchicalIntegrityHash = 3,
+ AutoSha3 = 4,
+ HierarchicalSha3256Hash = 5,
+ HierarchicalIntegritySha3Hash = 6,
+ };
+
+ enum class MetaDataHashType : u8 {
+ None = 0,
+ HierarchicalIntegrity = 1,
+ };
+
+ union HashData {
+ struct HierarchicalSha256Data {
+ static constexpr size_t HashLayerCountMax = 5;
+ static const size_t MasterHashOffset;
+
+ Hash fs_data_master_hash;
+ s32 hash_block_size;
+ s32 hash_layer_count;
+ std::array<Region, HashLayerCountMax> hash_layer_region;
+ } hierarchical_sha256_data;
+ static_assert(std::is_trivial_v<HierarchicalSha256Data>);
+
+ struct IntegrityMetaInfo {
+ static const size_t MasterHashOffset;
+
+ u32 magic;
+ u32 version;
+ u32 master_hash_size;
+
+ struct LevelHashInfo {
+ u32 max_layers;
+
+ struct HierarchicalIntegrityVerificationLevelInformation {
+ static constexpr size_t IntegrityMaxLayerCount = 7;
+ Int64 offset;
+ Int64 size;
+ s32 block_order;
+ std::array<u8, 4> reserved;
+ };
+ std::array<
+ HierarchicalIntegrityVerificationLevelInformation,
+ HierarchicalIntegrityVerificationLevelInformation::IntegrityMaxLayerCount - 1>
+ info;
+
+ struct SignatureSalt {
+ static constexpr size_t Size = 0x20;
+ std::array<u8, Size> value;
+ };
+ SignatureSalt seed;
+ } level_hash_info;
+
+ Hash master_hash;
+ } integrity_meta_info;
+ static_assert(std::is_trivial_v<IntegrityMetaInfo>);
+
+ std::array<u8, NcaPatchInfo::Offset - HashDataOffset> padding;
+ };
+
+ u16 version;
+ FsType fs_type;
+ HashType hash_type;
+ EncryptionType encryption_type;
+ MetaDataHashType meta_data_hash_type;
+ std::array<u8, 2> reserved;
+ HashData hash_data;
+ NcaPatchInfo patch_info;
+ NcaAesCtrUpperIv aes_ctr_upper_iv;
+ NcaSparseInfo sparse_info;
+ NcaCompressionInfo compression_info;
+ NcaMetaDataHashDataInfo meta_data_hash_data_info;
+ std::array<u8, 0x30> pad;
+
+ bool IsSkipLayerHashEncryption() const {
+ return this->encryption_type == EncryptionType::AesCtrSkipLayerHash ||
+ this->encryption_type == EncryptionType::AesCtrExSkipLayerHash;
+ }
+
+ Result GetHashTargetOffset(s64* out) const {
+ switch (this->hash_type) {
+ case HashType::HierarchicalIntegrityHash:
+ case HashType::HierarchicalIntegritySha3Hash:
+ *out = this->hash_data.integrity_meta_info.level_hash_info
+ .info[this->hash_data.integrity_meta_info.level_hash_info.max_layers - 2]
+ .offset;
+ R_SUCCEED();
+ case HashType::HierarchicalSha256Hash:
+ case HashType::HierarchicalSha3256Hash:
+ *out =
+ this->hash_data.hierarchical_sha256_data
+ .hash_layer_region[this->hash_data.hierarchical_sha256_data.hash_layer_count -
+ 1]
+ .offset;
+ R_SUCCEED();
+ default:
+ R_THROW(ResultInvalidNcaFsHeader);
+ }
+ }
+};
+static_assert(sizeof(NcaFsHeader) == NcaFsHeader::Size);
+static_assert(std::is_trivial_v<NcaFsHeader>);
+static_assert(offsetof(NcaFsHeader, patch_info) == NcaPatchInfo::Offset);
+
+inline constexpr const size_t NcaFsHeader::HashData::HierarchicalSha256Data::MasterHashOffset =
+ offsetof(NcaFsHeader, hash_data.hierarchical_sha256_data.fs_data_master_hash);
+inline constexpr const size_t NcaFsHeader::HashData::IntegrityMetaInfo::MasterHashOffset =
+ offsetof(NcaFsHeader, hash_data.integrity_meta_info.master_hash);
+
+struct NcaMetaDataHashData {
+ s64 layer_info_offset;
+ NcaFsHeader::HashData::IntegrityMetaInfo integrity_meta_info;
+};
+static_assert(sizeof(NcaMetaDataHashData) ==
+ sizeof(NcaFsHeader::HashData::IntegrityMetaInfo) + sizeof(s64));
+static_assert(std::is_trivial_v<NcaMetaDataHashData>);
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_nca_reader.cpp b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp
new file mode 100644
index 000000000..a3714ab37
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp
@@ -0,0 +1,531 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
+#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
+#include "core/file_sys/vfs_offset.h"
+
+namespace FileSys {
+
+namespace {
+
+constexpr inline u32 SdkAddonVersionMin = 0x000B0000;
+constexpr inline size_t Aes128KeySize = 0x10;
+constexpr const std::array<u8, Aes128KeySize> ZeroKey{};
+
+constexpr Result CheckNcaMagic(u32 magic) {
+ // Verify the magic is not a deprecated one.
+ R_UNLESS(magic != NcaHeader::Magic0, ResultUnsupportedSdkVersion);
+ R_UNLESS(magic != NcaHeader::Magic1, ResultUnsupportedSdkVersion);
+ R_UNLESS(magic != NcaHeader::Magic2, ResultUnsupportedSdkVersion);
+
+ // Verify the magic is the current one.
+ R_UNLESS(magic == NcaHeader::Magic3, ResultInvalidNcaSignature);
+
+ R_SUCCEED();
+}
+
+} // namespace
+
+NcaReader::NcaReader()
+ : m_body_storage(), m_header_storage(), m_is_software_aes_prioritized(false),
+ m_is_available_sw_key(false), m_header_encryption_type(NcaHeader::EncryptionType::Auto),
+ m_get_decompressor() {
+ std::memset(std::addressof(m_header), 0, sizeof(m_header));
+ std::memset(std::addressof(m_decryption_keys), 0, sizeof(m_decryption_keys));
+ std::memset(std::addressof(m_external_decryption_key), 0, sizeof(m_external_decryption_key));
+}
+
+NcaReader::~NcaReader() {}
+
+Result NcaReader::Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
+ const NcaCompressionConfiguration& compression_cfg) {
+ // Validate preconditions.
+ ASSERT(base_storage != nullptr);
+ ASSERT(m_body_storage == nullptr);
+
+ // Create the work header storage storage.
+ VirtualFile work_header_storage;
+
+ // We need to be able to generate keys.
+ R_UNLESS(crypto_cfg.generate_key != nullptr, ResultInvalidArgument);
+
+ // Generate keys for header.
+ using AesXtsStorageForNcaHeader = AesXtsStorage;
+
+ constexpr std::array<s32, NcaCryptoConfiguration::HeaderEncryptionKeyCount>
+ HeaderKeyTypeValues = {
+ static_cast<s32>(KeyType::NcaHeaderKey1),
+ static_cast<s32>(KeyType::NcaHeaderKey2),
+ };
+
+ std::array<std::array<u8, NcaCryptoConfiguration::Aes128KeySize>,
+ NcaCryptoConfiguration::HeaderEncryptionKeyCount>
+ header_decryption_keys;
+ for (size_t i = 0; i < NcaCryptoConfiguration::HeaderEncryptionKeyCount; i++) {
+ crypto_cfg.generate_key(header_decryption_keys[i].data(),
+ AesXtsStorageForNcaHeader::KeySize,
+ crypto_cfg.header_encrypted_encryption_keys[i].data(),
+ AesXtsStorageForNcaHeader::KeySize, HeaderKeyTypeValues[i]);
+ }
+
+ // Create the header storage.
+ std::array<u8, AesXtsStorageForNcaHeader::IvSize> header_iv = {};
+ work_header_storage = std::make_unique<AesXtsStorageForNcaHeader>(
+ base_storage, header_decryption_keys[0].data(), header_decryption_keys[1].data(),
+ AesXtsStorageForNcaHeader::KeySize, header_iv.data(), AesXtsStorageForNcaHeader::IvSize,
+ NcaHeader::XtsBlockSize);
+
+ // Check that we successfully created the storage.
+ R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
+
+ // Read the header.
+ work_header_storage->ReadObject(std::addressof(m_header), 0);
+
+ // Validate the magic.
+ if (const Result magic_result = CheckNcaMagic(m_header.magic); R_FAILED(magic_result)) {
+ // Try to use a plaintext header.
+ base_storage->ReadObject(std::addressof(m_header), 0);
+ R_UNLESS(R_SUCCEEDED(CheckNcaMagic(m_header.magic)), magic_result);
+
+ // Configure to use the plaintext header.
+ auto base_storage_size = base_storage->GetSize();
+ work_header_storage = std::make_shared<OffsetVfsFile>(base_storage, base_storage_size, 0);
+ R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
+
+ // Set encryption type as plaintext.
+ m_header_encryption_type = NcaHeader::EncryptionType::None;
+ }
+
+ // Verify the header sign1.
+ if (crypto_cfg.verify_sign1 != nullptr) {
+ const u8* sig = m_header.header_sign_1.data();
+ const size_t sig_size = NcaHeader::HeaderSignSize;
+ const u8* msg =
+ static_cast<const u8*>(static_cast<const void*>(std::addressof(m_header.magic)));
+ const size_t msg_size =
+ NcaHeader::Size - NcaHeader::HeaderSignSize * NcaHeader::HeaderSignCount;
+
+ m_is_header_sign1_signature_valid = crypto_cfg.verify_sign1(
+ sig, sig_size, msg, msg_size, m_header.header1_signature_key_generation);
+
+ if (!m_is_header_sign1_signature_valid) {
+ LOG_WARNING(Common_Filesystem, "Invalid NCA header sign1");
+ }
+ }
+
+ // Validate the sdk version.
+ R_UNLESS(m_header.sdk_addon_version >= SdkAddonVersionMin, ResultUnsupportedSdkVersion);
+
+ // Validate the key index.
+ R_UNLESS(m_header.key_index < NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount ||
+ m_header.key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey,
+ ResultInvalidNcaKeyIndex);
+
+ // Check if we have a rights id.
+ constexpr const std::array<u8, NcaHeader::RightsIdSize> ZeroRightsId{};
+ if (std::memcmp(ZeroRightsId.data(), m_header.rights_id.data(), NcaHeader::RightsIdSize) == 0) {
+ // If we don't, then we don't have an external key, so we need to generate decryption keys.
+ crypto_cfg.generate_key(
+ m_decryption_keys[NcaHeader::DecryptionKey_AesCtr].data(), Aes128KeySize,
+ m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtr * Aes128KeySize,
+ Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
+ crypto_cfg.generate_key(
+ m_decryption_keys[NcaHeader::DecryptionKey_AesXts1].data(), Aes128KeySize,
+ m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts1 * Aes128KeySize,
+ Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
+ crypto_cfg.generate_key(
+ m_decryption_keys[NcaHeader::DecryptionKey_AesXts2].data(), Aes128KeySize,
+ m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts2 * Aes128KeySize,
+ Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
+ crypto_cfg.generate_key(
+ m_decryption_keys[NcaHeader::DecryptionKey_AesCtrEx].data(), Aes128KeySize,
+ m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtrEx * Aes128KeySize,
+ Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
+
+ // Copy the hardware speed emulation key.
+ std::memcpy(m_decryption_keys[NcaHeader::DecryptionKey_AesCtrHw].data(),
+ m_header.encrypted_key_area.data() +
+ NcaHeader::DecryptionKey_AesCtrHw * Aes128KeySize,
+ Aes128KeySize);
+ }
+
+ // Clear the external decryption key.
+ std::memset(m_external_decryption_key.data(), 0, m_external_decryption_key.size());
+
+ // Set software key availability.
+ m_is_available_sw_key = crypto_cfg.is_available_sw_key;
+
+ // Set our decompressor function getter.
+ m_get_decompressor = compression_cfg.get_decompressor;
+
+ // Set our storages.
+ m_header_storage = std::move(work_header_storage);
+ m_body_storage = std::move(base_storage);
+
+ R_SUCCEED();
+}
+
+VirtualFile NcaReader::GetSharedBodyStorage() {
+ ASSERT(m_body_storage != nullptr);
+ return m_body_storage;
+}
+
+u32 NcaReader::GetMagic() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.magic;
+}
+
+NcaHeader::DistributionType NcaReader::GetDistributionType() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.distribution_type;
+}
+
+NcaHeader::ContentType NcaReader::GetContentType() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.content_type;
+}
+
+u8 NcaReader::GetHeaderSign1KeyGeneration() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.header1_signature_key_generation;
+}
+
+u8 NcaReader::GetKeyGeneration() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.GetProperKeyGeneration();
+}
+
+u8 NcaReader::GetKeyIndex() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.key_index;
+}
+
+u64 NcaReader::GetContentSize() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.content_size;
+}
+
+u64 NcaReader::GetProgramId() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.program_id;
+}
+
+u32 NcaReader::GetContentIndex() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.content_index;
+}
+
+u32 NcaReader::GetSdkAddonVersion() const {
+ ASSERT(m_body_storage != nullptr);
+ return m_header.sdk_addon_version;
+}
+
+void NcaReader::GetRightsId(u8* dst, size_t dst_size) const {
+ ASSERT(dst != nullptr);
+ ASSERT(dst_size >= NcaHeader::RightsIdSize);
+
+ std::memcpy(dst, m_header.rights_id.data(), NcaHeader::RightsIdSize);
+}
+
+bool NcaReader::HasFsInfo(s32 index) const {
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ return m_header.fs_info[index].start_sector != 0 || m_header.fs_info[index].end_sector != 0;
+}
+
+s32 NcaReader::GetFsCount() const {
+ ASSERT(m_body_storage != nullptr);
+ for (s32 i = 0; i < NcaHeader::FsCountMax; i++) {
+ if (!this->HasFsInfo(i)) {
+ return i;
+ }
+ }
+ return NcaHeader::FsCountMax;
+}
+
+const Hash& NcaReader::GetFsHeaderHash(s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ return m_header.fs_header_hash[index];
+}
+
+void NcaReader::GetFsHeaderHash(Hash* dst, s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ ASSERT(dst != nullptr);
+ std::memcpy(dst, std::addressof(m_header.fs_header_hash[index]), sizeof(*dst));
+}
+
+void NcaReader::GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ ASSERT(dst != nullptr);
+ std::memcpy(dst, std::addressof(m_header.fs_info[index]), sizeof(*dst));
+}
+
+u64 NcaReader::GetFsOffset(s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ return NcaHeader::SectorToByte(m_header.fs_info[index].start_sector);
+}
+
+u64 NcaReader::GetFsEndOffset(s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector);
+}
+
+u64 NcaReader::GetFsSize(s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+ return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector -
+ m_header.fs_info[index].start_sector);
+}
+
+void NcaReader::GetEncryptedKey(void* dst, size_t size) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(dst != nullptr);
+ ASSERT(size >= NcaHeader::EncryptedKeyAreaSize);
+
+ std::memcpy(dst, m_header.encrypted_key_area.data(), NcaHeader::EncryptedKeyAreaSize);
+}
+
+const void* NcaReader::GetDecryptionKey(s32 index) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::DecryptionKey_Count);
+ return m_decryption_keys[index].data();
+}
+
+bool NcaReader::HasValidInternalKey() const {
+ for (s32 i = 0; i < NcaHeader::DecryptionKey_Count; i++) {
+ if (std::memcmp(ZeroKey.data(), m_header.encrypted_key_area.data() + i * Aes128KeySize,
+ Aes128KeySize) != 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool NcaReader::HasInternalDecryptionKeyForAesHw() const {
+ return std::memcmp(ZeroKey.data(), this->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtrHw),
+ Aes128KeySize) != 0;
+}
+
+bool NcaReader::IsSoftwareAesPrioritized() const {
+ return m_is_software_aes_prioritized;
+}
+
+void NcaReader::PrioritizeSoftwareAes() {
+ m_is_software_aes_prioritized = true;
+}
+
+bool NcaReader::IsAvailableSwKey() const {
+ return m_is_available_sw_key;
+}
+
+bool NcaReader::HasExternalDecryptionKey() const {
+ return std::memcmp(ZeroKey.data(), this->GetExternalDecryptionKey(), Aes128KeySize) != 0;
+}
+
+const void* NcaReader::GetExternalDecryptionKey() const {
+ return m_external_decryption_key.data();
+}
+
+void NcaReader::SetExternalDecryptionKey(const void* src, size_t size) {
+ ASSERT(src != nullptr);
+ ASSERT(size == sizeof(m_external_decryption_key));
+
+ std::memcpy(m_external_decryption_key.data(), src, sizeof(m_external_decryption_key));
+}
+
+void NcaReader::GetRawData(void* dst, size_t dst_size) const {
+ ASSERT(m_body_storage != nullptr);
+ ASSERT(dst != nullptr);
+ ASSERT(dst_size >= sizeof(NcaHeader));
+
+ std::memcpy(dst, std::addressof(m_header), sizeof(NcaHeader));
+}
+
+GetDecompressorFunction NcaReader::GetDecompressor() const {
+ ASSERT(m_get_decompressor != nullptr);
+ return m_get_decompressor;
+}
+
+NcaHeader::EncryptionType NcaReader::GetEncryptionType() const {
+ return m_header_encryption_type;
+}
+
+Result NcaReader::ReadHeader(NcaFsHeader* dst, s32 index) const {
+ ASSERT(dst != nullptr);
+ ASSERT(0 <= index && index < NcaHeader::FsCountMax);
+
+ const s64 offset = sizeof(NcaHeader) + sizeof(NcaFsHeader) * index;
+ m_header_storage->ReadObject(dst, offset);
+
+ R_SUCCEED();
+}
+
+bool NcaReader::GetHeaderSign1Valid() const {
+ return m_is_header_sign1_signature_valid;
+}
+
+void NcaReader::GetHeaderSign2(void* dst, size_t size) const {
+ ASSERT(dst != nullptr);
+ ASSERT(size == NcaHeader::HeaderSignSize);
+
+ std::memcpy(dst, m_header.header_sign_2.data(), size);
+}
+
+Result NcaFsHeaderReader::Initialize(const NcaReader& reader, s32 index) {
+ // Reset ourselves to uninitialized.
+ m_fs_index = -1;
+
+ // Read the header.
+ R_TRY(reader.ReadHeader(std::addressof(m_data), index));
+
+ // Set our index.
+ m_fs_index = index;
+ R_SUCCEED();
+}
+
+void NcaFsHeaderReader::GetRawData(void* dst, size_t dst_size) const {
+ ASSERT(this->IsInitialized());
+ ASSERT(dst != nullptr);
+ ASSERT(dst_size >= sizeof(NcaFsHeader));
+
+ std::memcpy(dst, std::addressof(m_data), sizeof(NcaFsHeader));
+}
+
+NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() {
+ ASSERT(this->IsInitialized());
+ return m_data.hash_data;
+}
+
+const NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() const {
+ ASSERT(this->IsInitialized());
+ return m_data.hash_data;
+}
+
+u16 NcaFsHeaderReader::GetVersion() const {
+ ASSERT(this->IsInitialized());
+ return m_data.version;
+}
+
+s32 NcaFsHeaderReader::GetFsIndex() const {
+ ASSERT(this->IsInitialized());
+ return m_fs_index;
+}
+
+NcaFsHeader::FsType NcaFsHeaderReader::GetFsType() const {
+ ASSERT(this->IsInitialized());
+ return m_data.fs_type;
+}
+
+NcaFsHeader::HashType NcaFsHeaderReader::GetHashType() const {
+ ASSERT(this->IsInitialized());
+ return m_data.hash_type;
+}
+
+NcaFsHeader::EncryptionType NcaFsHeaderReader::GetEncryptionType() const {
+ ASSERT(this->IsInitialized());
+ return m_data.encryption_type;
+}
+
+NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() {
+ ASSERT(this->IsInitialized());
+ return m_data.patch_info;
+}
+
+const NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() const {
+ ASSERT(this->IsInitialized());
+ return m_data.patch_info;
+}
+
+const NcaAesCtrUpperIv NcaFsHeaderReader::GetAesCtrUpperIv() const {
+ ASSERT(this->IsInitialized());
+ return m_data.aes_ctr_upper_iv;
+}
+
+bool NcaFsHeaderReader::IsSkipLayerHashEncryption() const {
+ ASSERT(this->IsInitialized());
+ return m_data.IsSkipLayerHashEncryption();
+}
+
+Result NcaFsHeaderReader::GetHashTargetOffset(s64* out) const {
+ ASSERT(out != nullptr);
+ ASSERT(this->IsInitialized());
+
+ R_RETURN(m_data.GetHashTargetOffset(out));
+}
+
+bool NcaFsHeaderReader::ExistsSparseLayer() const {
+ ASSERT(this->IsInitialized());
+ return m_data.sparse_info.generation != 0;
+}
+
+NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() {
+ ASSERT(this->IsInitialized());
+ return m_data.sparse_info;
+}
+
+const NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() const {
+ ASSERT(this->IsInitialized());
+ return m_data.sparse_info;
+}
+
+bool NcaFsHeaderReader::ExistsCompressionLayer() const {
+ ASSERT(this->IsInitialized());
+ return m_data.compression_info.bucket.offset != 0 && m_data.compression_info.bucket.size != 0;
+}
+
+NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() {
+ ASSERT(this->IsInitialized());
+ return m_data.compression_info;
+}
+
+const NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() const {
+ ASSERT(this->IsInitialized());
+ return m_data.compression_info;
+}
+
+bool NcaFsHeaderReader::ExistsPatchMetaHashLayer() const {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_data_info.size != 0 && this->GetPatchInfo().HasIndirectTable();
+}
+
+NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_data_info;
+}
+
+const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() const {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_data_info;
+}
+
+NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetPatchMetaHashType() const {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_type;
+}
+
+bool NcaFsHeaderReader::ExistsSparseMetaHashLayer() const {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_data_info.size != 0 && this->ExistsSparseLayer();
+}
+
+NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_data_info;
+}
+
+const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() const {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_data_info;
+}
+
+NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetSparseMetaHashType() const {
+ ASSERT(this->IsInitialized());
+ return m_data.meta_data_hash_type;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp
new file mode 100644
index 000000000..bbfaab255
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp
@@ -0,0 +1,61 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
+
+namespace FileSys {
+
+namespace {
+
+constexpr size_t HeapBlockSize = BufferPoolAlignment;
+static_assert(HeapBlockSize == 4_KiB);
+
+// A heap block is 4KiB. An order is a power of two.
+// This gives blocks of the order 32KiB, 512KiB, 4MiB.
+constexpr s32 HeapOrderMax = 7;
+constexpr s32 HeapOrderMaxForLarge = HeapOrderMax + 3;
+
+constexpr size_t HeapAllocatableSizeMax = HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMax);
+constexpr size_t HeapAllocatableSizeMaxForLarge =
+ HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMaxForLarge);
+
+} // namespace
+
+size_t PooledBuffer::GetAllocatableSizeMaxCore(bool large) {
+ return large ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax;
+}
+
+void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool large) {
+ // Ensure preconditions.
+ ASSERT(m_buffer == nullptr);
+
+ // Check that we can allocate this size.
+ ASSERT(required_size <= GetAllocatableSizeMaxCore(large));
+
+ const size_t target_size =
+ std::min(std::max(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
+
+ // Dummy implementation for allocate.
+ if (target_size > 0) {
+ m_buffer =
+ reinterpret_cast<char*>(::operator new(target_size, std::align_val_t{HeapBlockSize}));
+ m_size = target_size;
+
+ // Ensure postconditions.
+ ASSERT(m_buffer != nullptr);
+ }
+}
+
+void PooledBuffer::Shrink(size_t ideal_size) {
+ ASSERT(ideal_size <= GetAllocatableSizeMaxCore(true));
+
+ // Shrinking to zero means that we have no buffer.
+ if (ideal_size == 0) {
+ ::operator delete(m_buffer, std::align_val_t{HeapBlockSize});
+ m_buffer = nullptr;
+ m_size = ideal_size;
+ }
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.h b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h
new file mode 100644
index 000000000..9a6adbcb5
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h
@@ -0,0 +1,95 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/literals.h"
+#include "core/hle/result.h"
+
+namespace FileSys {
+
+using namespace Common::Literals;
+
+constexpr inline size_t BufferPoolAlignment = 4_KiB;
+constexpr inline size_t BufferPoolWorkSize = 320;
+
+class PooledBuffer {
+ YUZU_NON_COPYABLE(PooledBuffer);
+
+public:
+ // Constructor/Destructor.
+ constexpr PooledBuffer() : m_buffer(), m_size() {}
+
+ PooledBuffer(size_t ideal_size, size_t required_size) : m_buffer(), m_size() {
+ this->Allocate(ideal_size, required_size);
+ }
+
+ ~PooledBuffer() {
+ this->Deallocate();
+ }
+
+ // Move and assignment.
+ explicit PooledBuffer(PooledBuffer&& rhs) : m_buffer(rhs.m_buffer), m_size(rhs.m_size) {
+ rhs.m_buffer = nullptr;
+ rhs.m_size = 0;
+ }
+
+ PooledBuffer& operator=(PooledBuffer&& rhs) {
+ PooledBuffer(std::move(rhs)).Swap(*this);
+ return *this;
+ }
+
+ // Allocation API.
+ void Allocate(size_t ideal_size, size_t required_size) {
+ return this->AllocateCore(ideal_size, required_size, false);
+ }
+
+ void AllocateParticularlyLarge(size_t ideal_size, size_t required_size) {
+ return this->AllocateCore(ideal_size, required_size, true);
+ }
+
+ void Shrink(size_t ideal_size);
+
+ void Deallocate() {
+ // Shrink the buffer to empty.
+ this->Shrink(0);
+ ASSERT(m_buffer == nullptr);
+ }
+
+ char* GetBuffer() const {
+ ASSERT(m_buffer != nullptr);
+ return m_buffer;
+ }
+
+ size_t GetSize() const {
+ ASSERT(m_buffer != nullptr);
+ return m_size;
+ }
+
+public:
+ static size_t GetAllocatableSizeMax() {
+ return GetAllocatableSizeMaxCore(false);
+ }
+ static size_t GetAllocatableParticularlyLargeSizeMax() {
+ return GetAllocatableSizeMaxCore(true);
+ }
+
+private:
+ static size_t GetAllocatableSizeMaxCore(bool large);
+
+private:
+ void Swap(PooledBuffer& rhs) {
+ std::swap(m_buffer, rhs.m_buffer);
+ std::swap(m_size, rhs.m_size);
+ }
+
+ void AllocateCore(size_t ideal_size, size_t required_size, bool large);
+
+private:
+ char* m_buffer;
+ size_t m_size;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp
new file mode 100644
index 000000000..8574a11dd
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp
@@ -0,0 +1,39 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
+
+namespace FileSys {
+
+size_t SparseStorage::Read(u8* buffer, size_t size, size_t offset) const {
+ // Validate preconditions.
+ ASSERT(this->IsInitialized());
+ ASSERT(buffer != nullptr);
+
+ // Allow zero size.
+ if (size == 0) {
+ return size;
+ }
+
+ SparseStorage* self = const_cast<SparseStorage*>(this);
+
+ if (self->GetEntryTable().IsEmpty()) {
+ BucketTree::Offsets table_offsets;
+ ASSERT(R_SUCCEEDED(self->GetEntryTable().GetOffsets(std::addressof(table_offsets))));
+ ASSERT(table_offsets.IsInclude(offset, size));
+
+ std::memset(buffer, 0, size);
+ } else {
+ self->OperatePerEntry<false, true>(
+ offset, size,
+ [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
+ storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
+ static_cast<size_t>(cur_size), data_offset);
+ R_SUCCEED();
+ });
+ }
+
+ return size;
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.h b/src/core/file_sys/fssystem/fssystem_sparse_storage.h
new file mode 100644
index 000000000..6c196ec61
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.h
@@ -0,0 +1,72 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
+
+namespace FileSys {
+
+class SparseStorage : public IndirectStorage {
+ YUZU_NON_COPYABLE(SparseStorage);
+ YUZU_NON_MOVEABLE(SparseStorage);
+
+private:
+ class ZeroStorage : public IReadOnlyStorage {
+ public:
+ ZeroStorage() {}
+ virtual ~ZeroStorage() {}
+
+ virtual size_t GetSize() const override {
+ return std::numeric_limits<size_t>::max();
+ }
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ ASSERT(buffer != nullptr || size == 0);
+
+ if (size > 0) {
+ std::memset(buffer, 0, size);
+ }
+
+ return size;
+ }
+ };
+
+public:
+ SparseStorage() : IndirectStorage(), m_zero_storage(std::make_shared<ZeroStorage>()) {}
+ virtual ~SparseStorage() {}
+
+ using IndirectStorage::Initialize;
+
+ void Initialize(s64 end_offset) {
+ this->GetEntryTable().Initialize(NodeSize, end_offset);
+ this->SetZeroStorage();
+ }
+
+ void SetDataStorage(VirtualFile storage) {
+ ASSERT(this->IsInitialized());
+
+ this->SetStorage(0, storage);
+ this->SetZeroStorage();
+ }
+
+ template <typename T>
+ void SetDataStorage(T storage, s64 offset, s64 size) {
+ ASSERT(this->IsInitialized());
+
+ this->SetStorage(0, storage, offset, size);
+ this->SetZeroStorage();
+ }
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
+
+private:
+ void SetZeroStorage() {
+ return this->SetStorage(1, m_zero_storage, 0, std::numeric_limits<s64>::max());
+ }
+
+private:
+ VirtualFile m_zero_storage;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_switch_storage.h b/src/core/file_sys/fssystem/fssystem_switch_storage.h
new file mode 100644
index 000000000..2b43927cb
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_switch_storage.h
@@ -0,0 +1,80 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/file_sys/fssystem/fs_i_storage.h"
+
+namespace FileSys {
+
+class RegionSwitchStorage : public IReadOnlyStorage {
+ YUZU_NON_COPYABLE(RegionSwitchStorage);
+ YUZU_NON_MOVEABLE(RegionSwitchStorage);
+
+public:
+ struct Region {
+ s64 offset;
+ s64 size;
+ };
+
+public:
+ RegionSwitchStorage(VirtualFile&& i, VirtualFile&& o, Region r)
+ : m_inside_region_storage(std::move(i)), m_outside_region_storage(std::move(o)),
+ m_region(r) {}
+
+ virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
+ // Process until we're done.
+ size_t processed = 0;
+ while (processed < size) {
+ // Process on the appropriate storage.
+ s64 cur_size = 0;
+ if (this->CheckRegions(std::addressof(cur_size), offset + processed,
+ size - processed)) {
+ m_inside_region_storage->Read(buffer + processed, cur_size, offset + processed);
+ } else {
+ m_outside_region_storage->Read(buffer + processed, cur_size, offset + processed);
+ }
+
+ // Advance.
+ processed += cur_size;
+ }
+
+ return size;
+ }
+
+ virtual size_t GetSize() const override {
+ return m_inside_region_storage->GetSize();
+ }
+
+private:
+ bool CheckRegions(s64* out_current_size, s64 offset, s64 size) const {
+ // Check if our region contains the access.
+ if (m_region.offset <= offset) {
+ if (offset < m_region.offset + m_region.size) {
+ if (m_region.offset + m_region.size <= offset + size) {
+ *out_current_size = m_region.offset + m_region.size - offset;
+ } else {
+ *out_current_size = size;
+ }
+ return true;
+ } else {
+ *out_current_size = size;
+ return false;
+ }
+ } else {
+ if (m_region.offset <= offset + size) {
+ *out_current_size = m_region.offset - offset;
+ } else {
+ *out_current_size = size;
+ }
+ return false;
+ }
+ }
+
+private:
+ VirtualFile m_inside_region_storage;
+ VirtualFile m_outside_region_storage;
+ Region m_region;
+};
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_utility.cpp b/src/core/file_sys/fssystem/fssystem_utility.cpp
new file mode 100644
index 000000000..ceabb8ff1
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_utility.cpp
@@ -0,0 +1,27 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/file_sys/fssystem/fssystem_utility.h"
+
+namespace FileSys {
+
+void AddCounter(void* counter_, size_t counter_size, u64 value) {
+ u8* counter = static_cast<u8*>(counter_);
+ u64 remaining = value;
+ u8 carry = 0;
+
+ for (size_t i = 0; i < counter_size; i++) {
+ auto sum = counter[counter_size - 1 - i] + (remaining & 0xFF) + carry;
+ carry = static_cast<u8>(sum >> (sizeof(u8) * 8));
+ auto sum8 = static_cast<u8>(sum & 0xFF);
+
+ counter[counter_size - 1 - i] = sum8;
+
+ remaining >>= (sizeof(u8) * 8);
+ if (carry == 0 && remaining == 0) {
+ break;
+ }
+ }
+}
+
+} // namespace FileSys
diff --git a/src/core/file_sys/fssystem/fssystem_utility.h b/src/core/file_sys/fssystem/fssystem_utility.h
new file mode 100644
index 000000000..284b8b811
--- /dev/null
+++ b/src/core/file_sys/fssystem/fssystem_utility.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+
+namespace FileSys {
+
+void AddCounter(void* counter, size_t counter_size, u64 value);
+
+}
diff --git a/src/core/file_sys/nca_patch.cpp b/src/core/file_sys/nca_patch.cpp
deleted file mode 100644
index 2735d053b..000000000
--- a/src/core/file_sys/nca_patch.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <algorithm>
-#include <array>
-#include <cstddef>
-#include <cstring>
-
-#include "common/assert.h"
-#include "core/crypto/aes_util.h"
-#include "core/file_sys/nca_patch.h"
-
-namespace FileSys {
-namespace {
-template <bool Subsection, typename BlockType, typename BucketType>
-std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockType& block,
- const BucketType& buckets) {
- if constexpr (Subsection) {
- const auto& last_bucket = buckets[block.number_buckets - 1];
- if (offset >= last_bucket.entries[last_bucket.number_entries].address_patch) {
- return {block.number_buckets - 1, last_bucket.number_entries};
- }
- } else {
- ASSERT_MSG(offset <= block.size, "Offset is out of bounds in BKTR relocation block.");
- }
-
- std::size_t bucket_id = std::count_if(
- block.base_offsets.begin() + 1, block.base_offsets.begin() + block.number_buckets,
- [&offset](u64 base_offset) { return base_offset <= offset; });
-
- const auto& bucket = buckets[bucket_id];
-
- if (bucket.number_entries == 1) {
- return {bucket_id, 0};
- }
-
- std::size_t low = 0;
- std::size_t mid = 0;
- std::size_t high = bucket.number_entries - 1;
- while (low <= high) {
- mid = (low + high) / 2;
- if (bucket.entries[mid].address_patch > offset) {
- high = mid - 1;
- } else {
- if (mid == bucket.number_entries - 1 ||
- bucket.entries[mid + 1].address_patch > offset) {
- return {bucket_id, mid};
- }
-
- low = mid + 1;
- }
- }
- ASSERT_MSG(false, "Offset could not be found in BKTR block.");
- return {0, 0};
-}
-} // Anonymous namespace
-
-BKTR::BKTR(VirtualFile base_romfs_, VirtualFile bktr_romfs_, RelocationBlock relocation_,
- std::vector<RelocationBucket> relocation_buckets_, SubsectionBlock subsection_,
- std::vector<SubsectionBucket> subsection_buckets_, bool is_encrypted_,
- Core::Crypto::Key128 key_, u64 base_offset_, u64 ivfc_offset_,
- std::array<u8, 8> section_ctr_)
- : relocation(relocation_), relocation_buckets(std::move(relocation_buckets_)),
- subsection(subsection_), subsection_buckets(std::move(subsection_buckets_)),
- base_romfs(std::move(base_romfs_)), bktr_romfs(std::move(bktr_romfs_)),
- encrypted(is_encrypted_), key(key_), base_offset(base_offset_), ivfc_offset(ivfc_offset_),
- section_ctr(section_ctr_) {
- for (std::size_t i = 0; i < relocation.number_buckets - 1; ++i) {
- relocation_buckets[i].entries.push_back({relocation.base_offsets[i + 1], 0, 0});
- }
-
- for (std::size_t i = 0; i < subsection.number_buckets - 1; ++i) {
- subsection_buckets[i].entries.push_back({subsection_buckets[i + 1].entries[0].address_patch,
- {0},
- subsection_buckets[i + 1].entries[0].ctr});
- }
-
- relocation_buckets.back().entries.push_back({relocation.size, 0, 0});
-}
-
-BKTR::~BKTR() = default;
-
-std::size_t BKTR::Read(u8* data, std::size_t length, std::size_t offset) const {
- // Read out of bounds.
- if (offset >= relocation.size) {
- return 0;
- }
-
- const auto relocation_entry = GetRelocationEntry(offset);
- const auto section_offset =
- offset - relocation_entry.address_patch + relocation_entry.address_source;
- const auto bktr_read = relocation_entry.from_patch;
-
- const auto next_relocation = GetNextRelocationEntry(offset);
-
- if (offset + length > next_relocation.address_patch) {
- const u64 partition = next_relocation.address_patch - offset;
- return Read(data, partition, offset) +
- Read(data + partition, length - partition, offset + partition);
- }
-
- if (!bktr_read) {
- ASSERT_MSG(section_offset >= ivfc_offset, "Offset calculation negative.");
- return base_romfs->Read(data, length, section_offset - ivfc_offset);
- }
-
- if (!encrypted) {
- return bktr_romfs->Read(data, length, section_offset);
- }
-
- const auto subsection_entry = GetSubsectionEntry(section_offset);
- Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(key, Core::Crypto::Mode::CTR);
-
- // Calculate AES IV
- std::array<u8, 16> iv{};
- auto subsection_ctr = subsection_entry.ctr;
- auto offset_iv = section_offset + base_offset;
- for (std::size_t i = 0; i < section_ctr.size(); ++i) {
- iv[i] = section_ctr[0x8 - i - 1];
- }
- offset_iv >>= 4;
- for (std::size_t i = 0; i < sizeof(u64); ++i) {
- iv[0xF - i] = static_cast<u8>(offset_iv & 0xFF);
- offset_iv >>= 8;
- }
- for (std::size_t i = 0; i < sizeof(u32); ++i) {
- iv[0x7 - i] = static_cast<u8>(subsection_ctr & 0xFF);
- subsection_ctr >>= 8;
- }
- cipher.SetIV(iv);
-
- const auto next_subsection = GetNextSubsectionEntry(section_offset);
-
- if (section_offset + length > next_subsection.address_patch) {
- const u64 partition = next_subsection.address_patch - section_offset;
- return Read(data, partition, offset) +
- Read(data + partition, length - partition, offset + partition);
- }
-
- const auto block_offset = section_offset & 0xF;
- if (block_offset != 0) {
- auto block = bktr_romfs->ReadBytes(0x10, section_offset & ~0xF);
- cipher.Transcode(block.data(), block.size(), block.data(), Core::Crypto::Op::Decrypt);
- if (length + block_offset < 0x10) {
- std::memcpy(data, block.data() + block_offset, std::min(length, block.size()));
- return std::min(length, block.size());
- }
-
- const auto read = 0x10 - block_offset;
- std::memcpy(data, block.data() + block_offset, read);
- return read + Read(data + read, length - read, offset + read);
- }
-
- const auto raw_read = bktr_romfs->Read(data, length, section_offset);
- cipher.Transcode(data, raw_read, data, Core::Crypto::Op::Decrypt);
- return raw_read;
-}
-
-RelocationEntry BKTR::GetRelocationEntry(u64 offset) const {
- const auto res = SearchBucketEntry<false>(offset, relocation, relocation_buckets);
- return relocation_buckets[res.first].entries[res.second];
-}
-
-RelocationEntry BKTR::GetNextRelocationEntry(u64 offset) const {
- const auto res = SearchBucketEntry<false>(offset, relocation, relocation_buckets);
- const auto bucket = relocation_buckets[res.first];
- if (res.second + 1 < bucket.entries.size())
- return bucket.entries[res.second + 1];
- return relocation_buckets[res.first + 1].entries[0];
-}
-
-SubsectionEntry BKTR::GetSubsectionEntry(u64 offset) const {
- const auto res = SearchBucketEntry<true>(offset, subsection, subsection_buckets);
- return subsection_buckets[res.first].entries[res.second];
-}
-
-SubsectionEntry BKTR::GetNextSubsectionEntry(u64 offset) const {
- const auto res = SearchBucketEntry<true>(offset, subsection, subsection_buckets);
- const auto bucket = subsection_buckets[res.first];
- if (res.second + 1 < bucket.entries.size())
- return bucket.entries[res.second + 1];
- return subsection_buckets[res.first + 1].entries[0];
-}
-
-std::string BKTR::GetName() const {
- return base_romfs->GetName();
-}
-
-std::size_t BKTR::GetSize() const {
- return relocation.size;
-}
-
-bool BKTR::Resize(std::size_t new_size) {
- return false;
-}
-
-VirtualDir BKTR::GetContainingDirectory() const {
- return base_romfs->GetContainingDirectory();
-}
-
-bool BKTR::IsWritable() const {
- return false;
-}
-
-bool BKTR::IsReadable() const {
- return true;
-}
-
-std::size_t BKTR::Write(const u8* data, std::size_t length, std::size_t offset) {
- return 0;
-}
-
-bool BKTR::Rename(std::string_view name) {
- return base_romfs->Rename(name);
-}
-
-} // namespace FileSys
diff --git a/src/core/file_sys/nca_patch.h b/src/core/file_sys/nca_patch.h
deleted file mode 100644
index 595e3ef09..000000000
--- a/src/core/file_sys/nca_patch.h
+++ /dev/null
@@ -1,145 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <array>
-#include <memory>
-#include <vector>
-
-#include "common/common_funcs.h"
-#include "common/common_types.h"
-#include "common/swap.h"
-#include "core/crypto/key_manager.h"
-
-namespace FileSys {
-
-#pragma pack(push, 1)
-struct RelocationEntry {
- u64_le address_patch;
- u64_le address_source;
- u32 from_patch;
-};
-#pragma pack(pop)
-static_assert(sizeof(RelocationEntry) == 0x14, "RelocationEntry has incorrect size.");
-
-struct RelocationBucketRaw {
- INSERT_PADDING_BYTES(4);
- u32_le number_entries;
- u64_le end_offset;
- std::array<RelocationEntry, 0x332> relocation_entries;
- INSERT_PADDING_BYTES(8);
-};
-static_assert(sizeof(RelocationBucketRaw) == 0x4000, "RelocationBucketRaw has incorrect size.");
-
-// Vector version of RelocationBucketRaw
-struct RelocationBucket {
- u32 number_entries;
- u64 end_offset;
- std::vector<RelocationEntry> entries;
-};
-
-struct RelocationBlock {
- INSERT_PADDING_BYTES(4);
- u32_le number_buckets;
- u64_le size;
- std::array<u64, 0x7FE> base_offsets;
-};
-static_assert(sizeof(RelocationBlock) == 0x4000, "RelocationBlock has incorrect size.");
-
-struct SubsectionEntry {
- u64_le address_patch;
- INSERT_PADDING_BYTES(0x4);
- u32_le ctr;
-};
-static_assert(sizeof(SubsectionEntry) == 0x10, "SubsectionEntry has incorrect size.");
-
-struct SubsectionBucketRaw {
- INSERT_PADDING_BYTES(4);
- u32_le number_entries;
- u64_le end_offset;
- std::array<SubsectionEntry, 0x3FF> subsection_entries;
-};
-static_assert(sizeof(SubsectionBucketRaw) == 0x4000, "SubsectionBucketRaw has incorrect size.");
-
-// Vector version of SubsectionBucketRaw
-struct SubsectionBucket {
- u32 number_entries;
- u64 end_offset;
- std::vector<SubsectionEntry> entries;
-};
-
-struct SubsectionBlock {
- INSERT_PADDING_BYTES(4);
- u32_le number_buckets;
- u64_le size;
- std::array<u64, 0x7FE> base_offsets;
-};
-static_assert(sizeof(SubsectionBlock) == 0x4000, "SubsectionBlock has incorrect size.");
-
-inline RelocationBucket ConvertRelocationBucketRaw(RelocationBucketRaw raw) {
- return {raw.number_entries,
- raw.end_offset,
- {raw.relocation_entries.begin(), raw.relocation_entries.begin() + raw.number_entries}};
-}
-
-inline SubsectionBucket ConvertSubsectionBucketRaw(SubsectionBucketRaw raw) {
- return {raw.number_entries,
- raw.end_offset,
- {raw.subsection_entries.begin(), raw.subsection_entries.begin() + raw.number_entries}};
-}
-
-class BKTR : public VfsFile {
-public:
- BKTR(VirtualFile base_romfs, VirtualFile bktr_romfs, RelocationBlock relocation,
- std::vector<RelocationBucket> relocation_buckets, SubsectionBlock subsection,
- std::vector<SubsectionBucket> subsection_buckets, bool is_encrypted,
- Core::Crypto::Key128 key, u64 base_offset, u64 ivfc_offset, std::array<u8, 8> section_ctr);
- ~BKTR() override;
-
- std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
-
- std::string GetName() const override;
-
- std::size_t GetSize() const override;
-
- bool Resize(std::size_t new_size) override;
-
- VirtualDir GetContainingDirectory() const override;
-
- bool IsWritable() const override;
-
- bool IsReadable() const override;
-
- std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
-
- bool Rename(std::string_view name) override;
-
-private:
- RelocationEntry GetRelocationEntry(u64 offset) const;
- RelocationEntry GetNextRelocationEntry(u64 offset) const;
-
- SubsectionEntry GetSubsectionEntry(u64 offset) const;
- SubsectionEntry GetNextSubsectionEntry(u64 offset) const;
-
- RelocationBlock relocation;
- std::vector<RelocationBucket> relocation_buckets;
- SubsectionBlock subsection;
- std::vector<SubsectionBucket> subsection_buckets;
-
- // Should be the raw base romfs, decrypted.
- VirtualFile base_romfs;
- // Should be the raw BKTR romfs, (located at media_offset with size media_size).
- VirtualFile bktr_romfs;
-
- bool encrypted;
- Core::Crypto::Key128 key;
-
- // Base offset into NCA, used for IV calculation.
- u64 base_offset;
- // Distance between IVFC start and RomFS start, used for base reads
- u64 ivfc_offset;
- std::array<u8, 8> section_ctr;
-};
-
-} // namespace FileSys
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index 2ba1b34a4..a4baddb15 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -141,8 +141,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
const auto update_tid = GetUpdateTitleID(title_id);
const auto update = content_provider.GetEntry(update_tid, ContentRecordType::Program);
- if (!update_disabled && update != nullptr && update->GetExeFS() != nullptr &&
- update->GetStatus() == Loader::ResultStatus::ErrorMissingBKTRBaseRomFS) {
+ if (!update_disabled && update != nullptr && update->GetExeFS() != nullptr) {
LOG_INFO(Loader, " ExeFS: Update ({}) applied successfully",
FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0)));
exefs = update->GetExeFS();
@@ -353,16 +352,12 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
const Service::FileSystem::FileSystemController& fs_controller) {
const auto load_dir = fs_controller.GetModificationLoadRoot(title_id);
const auto sdmc_load_dir = fs_controller.GetSDMCModificationLoadRoot(title_id);
- if ((type != ContentRecordType::Program && type != ContentRecordType::Data) ||
+ if ((type != ContentRecordType::Program && type != ContentRecordType::Data &&
+ type != ContentRecordType::HtmlDocument) ||
(load_dir == nullptr && sdmc_load_dir == nullptr)) {
return;
}
- auto extracted = ExtractRomFS(romfs);
- if (extracted == nullptr) {
- return;
- }
-
const auto& disabled = Settings::values.disabled_addons[title_id];
std::vector<VirtualDir> patch_dirs = load_dir->GetSubdirectories();
if (std::find(disabled.cbegin(), disabled.cend(), "SDMC") == disabled.cend()) {
@@ -387,6 +382,12 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
auto ext_dir = FindSubdirectoryCaseless(subdir, "romfs_ext");
if (ext_dir != nullptr)
layers_ext.push_back(std::make_shared<CachedVfsDirectory>(ext_dir));
+
+ if (type == ContentRecordType::HtmlDocument) {
+ auto manual_dir = FindSubdirectoryCaseless(subdir, "manual_html");
+ if (manual_dir != nullptr)
+ layers.push_back(std::make_shared<CachedVfsDirectory>(manual_dir));
+ }
}
// When there are no layers to apply, return early as there is no need to rebuild the RomFS
@@ -394,6 +395,11 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
return;
}
+ auto extracted = ExtractRomFS(romfs);
+ if (extracted == nullptr) {
+ return;
+ }
+
layers.push_back(std::move(extracted));
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
@@ -412,39 +418,43 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
romfs = std::move(packed);
}
-VirtualFile PatchManager::PatchRomFS(VirtualFile romfs, u64 ivfc_offset, ContentRecordType type,
- VirtualFile update_raw, bool apply_layeredfs) const {
+VirtualFile PatchManager::PatchRomFS(const NCA* base_nca, VirtualFile base_romfs,
+ ContentRecordType type, VirtualFile packed_update_raw,
+ bool apply_layeredfs) const {
const auto log_string = fmt::format("Patching RomFS for title_id={:016X}, type={:02X}",
title_id, static_cast<u8>(type));
-
if (type == ContentRecordType::Program || type == ContentRecordType::Data) {
LOG_INFO(Loader, "{}", log_string);
} else {
LOG_DEBUG(Loader, "{}", log_string);
}
- if (romfs == nullptr) {
- return romfs;
+ if (base_romfs == nullptr) {
+ return base_romfs;
}
+ auto romfs = base_romfs;
+
// Game Updates
const auto update_tid = GetUpdateTitleID(title_id);
- const auto update = content_provider.GetEntryRaw(update_tid, type);
+ const auto update_raw = content_provider.GetEntryRaw(update_tid, type);
const auto& disabled = Settings::values.disabled_addons[title_id];
const auto update_disabled =
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
- if (!update_disabled && update != nullptr) {
- const auto new_nca = std::make_shared<NCA>(update, romfs, ivfc_offset);
+ if (!update_disabled && update_raw != nullptr && base_nca != nullptr) {
+ const auto new_nca = std::make_shared<NCA>(update_raw, base_nca);
if (new_nca->GetStatus() == Loader::ResultStatus::Success &&
new_nca->GetRomFS() != nullptr) {
LOG_INFO(Loader, " RomFS: Update ({}) applied successfully",
FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0)));
romfs = new_nca->GetRomFS();
+ const auto version =
+ FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0));
}
- } else if (!update_disabled && update_raw != nullptr) {
- const auto new_nca = std::make_shared<NCA>(update_raw, romfs, ivfc_offset);
+ } else if (!update_disabled && packed_update_raw != nullptr && base_nca != nullptr) {
+ const auto new_nca = std::make_shared<NCA>(packed_update_raw, base_nca);
if (new_nca->GetStatus() == Loader::ResultStatus::Success &&
new_nca->GetRomFS() != nullptr) {
LOG_INFO(Loader, " RomFS: Update (PACKED) applied successfully");
@@ -608,7 +618,7 @@ PatchManager::Metadata PatchManager::ParseControlNCA(const NCA& nca) const {
return {};
}
- const auto romfs = PatchRomFS(base_romfs, nca.GetBaseIVFCOffset(), ContentRecordType::Control);
+ const auto romfs = PatchRomFS(&nca, base_romfs, ContentRecordType::Control);
if (romfs == nullptr) {
return {};
}
diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h
index 69d15e2f8..adcde7b7d 100644
--- a/src/core/file_sys/patch_manager.h
+++ b/src/core/file_sys/patch_manager.h
@@ -61,9 +61,9 @@ public:
// Currently tracked RomFS patches:
// - Game Updates
// - LayeredFS
- [[nodiscard]] VirtualFile PatchRomFS(VirtualFile base, u64 ivfc_offset,
+ [[nodiscard]] VirtualFile PatchRomFS(const NCA* base_nca, VirtualFile base_romfs,
ContentRecordType type = ContentRecordType::Program,
- VirtualFile update_raw = nullptr,
+ VirtualFile packed_update_raw = nullptr,
bool apply_layeredfs = true) const;
// Returns a vector of pairs between patch names and patch versions.
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index a6960170c..a28af3594 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -416,9 +416,9 @@ void RegisteredCache::ProcessFiles(const std::vector<NcaID>& ids) {
if (file == nullptr)
continue;
- const auto nca = std::make_shared<NCA>(parser(file, id), nullptr, 0);
+ const auto nca = std::make_shared<NCA>(parser(file, id));
if (nca->GetStatus() != Loader::ResultStatus::Success ||
- nca->GetType() != NCAContentType::Meta) {
+ nca->GetType() != NCAContentType::Meta || nca->GetSubdirectories().empty()) {
continue;
}
@@ -500,7 +500,7 @@ std::unique_ptr<NCA> RegisteredCache::GetEntry(u64 title_id, ContentRecordType t
const auto raw = GetEntryRaw(title_id, type);
if (raw == nullptr)
return nullptr;
- return std::make_unique<NCA>(raw, nullptr, 0);
+ return std::make_unique<NCA>(raw);
}
template <typename T>
@@ -964,7 +964,7 @@ std::unique_ptr<NCA> ManualContentProvider::GetEntry(u64 title_id, ContentRecord
const auto res = GetEntryRaw(title_id, type);
if (res == nullptr)
return nullptr;
- return std::make_unique<NCA>(res, nullptr, 0);
+ return std::make_unique<NCA>(res);
}
std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter(
diff --git a/src/core/file_sys/romfs_factory.cpp b/src/core/file_sys/romfs_factory.cpp
index aa4726cfa..1bc07dae5 100644
--- a/src/core/file_sys/romfs_factory.cpp
+++ b/src/core/file_sys/romfs_factory.cpp
@@ -26,13 +26,12 @@ RomFSFactory::RomFSFactory(Loader::AppLoader& app_loader, ContentProvider& provi
}
updatable = app_loader.IsRomFSUpdatable();
- ivfc_offset = app_loader.ReadRomFSIVFCOffset();
}
RomFSFactory::~RomFSFactory() = default;
void RomFSFactory::SetPackedUpdate(VirtualFile update_raw_file) {
- update_raw = std::move(update_raw_file);
+ packed_update_raw = std::move(update_raw_file);
}
VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const {
@@ -40,9 +39,11 @@ VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const
return file;
}
+ const auto type = ContentRecordType::Program;
+ const auto nca = content_provider.GetEntry(current_process_title_id, type);
const PatchManager patch_manager{current_process_title_id, filesystem_controller,
content_provider};
- return patch_manager.PatchRomFS(file, ivfc_offset, ContentRecordType::Program, update_raw);
+ return patch_manager.PatchRomFS(nca.get(), file, ContentRecordType::Program, packed_update_raw);
}
VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type) const {
@@ -54,7 +55,7 @@ VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type)
const PatchManager patch_manager{title_id, filesystem_controller, content_provider};
- return patch_manager.PatchRomFS(nca->GetRomFS(), nca->GetBaseIVFCOffset(), type);
+ return patch_manager.PatchRomFS(nca.get(), nca->GetRomFS(), type);
}
VirtualFile RomFSFactory::OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
diff --git a/src/core/file_sys/romfs_factory.h b/src/core/file_sys/romfs_factory.h
index 7ec40d19d..e4809bc94 100644
--- a/src/core/file_sys/romfs_factory.h
+++ b/src/core/file_sys/romfs_factory.h
@@ -40,21 +40,22 @@ public:
Service::FileSystem::FileSystemController& controller);
~RomFSFactory();
- void SetPackedUpdate(VirtualFile update_raw_file);
+ void SetPackedUpdate(VirtualFile packed_update_raw);
[[nodiscard]] VirtualFile OpenCurrentProcess(u64 current_process_title_id) const;
[[nodiscard]] VirtualFile OpenPatchedRomFS(u64 title_id, ContentRecordType type) const;
[[nodiscard]] VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
ContentRecordType type) const;
[[nodiscard]] VirtualFile Open(u64 title_id, StorageId storage, ContentRecordType type) const;
-
-private:
[[nodiscard]] std::shared_ptr<NCA> GetEntry(u64 title_id, StorageId storage,
ContentRecordType type) const;
+private:
VirtualFile file;
- VirtualFile update_raw;
+ VirtualFile packed_update_raw;
+
+ VirtualFile base;
+
bool updatable;
- u64 ivfc_offset;
ContentProvider& content_provider;
Service::FileSystem::FileSystemController& filesystem_controller;
diff --git a/src/core/file_sys/submission_package.cpp b/src/core/file_sys/submission_package.cpp
index c90e6e372..e1e89ce2d 100644
--- a/src/core/file_sys/submission_package.cpp
+++ b/src/core/file_sys/submission_package.cpp
@@ -249,7 +249,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
}
const auto nca = std::make_shared<NCA>(outer_file);
- if (nca->GetStatus() != Loader::ResultStatus::Success) {
+ if (nca->GetStatus() != Loader::ResultStatus::Success || nca->GetSubdirectories().empty()) {
program_status[nca->GetTitleId()] = nca->GetStatus();
continue;
}
@@ -280,7 +280,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
continue;
}
- auto next_nca = std::make_shared<NCA>(std::move(next_file), nullptr, 0);
+ auto next_nca = std::make_shared<NCA>(std::move(next_file));
if (next_nca->GetType() == NCAContentType::Program) {
program_status[next_nca->GetTitleId()] = next_nca->GetStatus();
diff --git a/src/core/hle/kernel/k_hardware_timer.h b/src/core/hle/kernel/k_hardware_timer.h
index 00bef6ea1..27f43cd19 100644
--- a/src/core/hle/kernel/k_hardware_timer.h
+++ b/src/core/hle/kernel/k_hardware_timer.h
@@ -19,13 +19,7 @@ public:
void Initialize();
void Finalize();
- s64 GetCount() const {
- return GetTick();
- }
-
- void RegisterTask(KTimerTask* task, s64 time_from_now) {
- this->RegisterAbsoluteTask(task, GetTick() + time_from_now);
- }
+ s64 GetTick() const;
void RegisterAbsoluteTask(KTimerTask* task, s64 task_time) {
KScopedDisableDispatch dd{m_kernel};
@@ -42,7 +36,6 @@ private:
void EnableInterrupt(s64 wakeup_time);
void DisableInterrupt();
bool GetInterruptEnabled();
- s64 GetTick() const;
void DoTask();
private:
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index fcee26a29..d8a63aaf8 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -5,6 +5,7 @@
#include "common/overflow.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/svc_results.h"
@@ -15,9 +16,7 @@ KResourceLimit::KResourceLimit(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock{m_kernel}, m_cond_var{m_kernel} {}
KResourceLimit::~KResourceLimit() = default;
-void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing) {
- m_core_timing = core_timing;
-}
+void KResourceLimit::Initialize() {}
void KResourceLimit::Finalize() {}
@@ -86,7 +85,7 @@ Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
- return Reserve(which, value, m_core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
+ return Reserve(which, value, m_kernel.HardwareTimer().GetTick() + DefaultTimeout);
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
@@ -117,7 +116,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
}
if (m_current_hints[index] + value <= m_limit_values[index] &&
- (timeout < 0 || m_core_timing->GetGlobalTimeNs().count() < timeout)) {
+ (timeout < 0 || m_kernel.HardwareTimer().GetTick() < timeout)) {
m_waiter_count++;
m_cond_var.Wait(std::addressof(m_lock), timeout, false);
m_waiter_count--;
@@ -154,7 +153,7 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) {
auto* resource_limit = KResourceLimit::Create(system.Kernel());
- resource_limit->Initialize(std::addressof(system.CoreTiming()));
+ resource_limit->Initialize();
// Initialize default resource limit values.
// TODO(bunnei): These values are the system defaults, the limits for service processes are
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index 15e69af56..b733ec8f8 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -31,7 +31,7 @@ public:
explicit KResourceLimit(KernelCore& kernel);
~KResourceLimit() override;
- void Initialize(const Core::Timing::CoreTiming* core_timing);
+ void Initialize();
void Finalize() override;
s64 GetLimitValue(LimitableResource which) const;
@@ -57,7 +57,6 @@ private:
mutable KLightLock m_lock;
s32 m_waiter_count{};
KLightConditionVariable m_cond_var;
- const Core::Timing::CoreTiming* m_core_timing{};
};
KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size);
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index c485022f5..b62415da7 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -28,7 +28,7 @@ public:
~KScopedSchedulerLockAndSleep() {
// Register the sleep.
if (m_timeout_tick > 0) {
- m_timer->RegisterTask(m_thread, m_timeout_tick);
+ m_timer->RegisterAbsoluteTask(m_thread, m_timeout_tick);
}
// Unlock the scheduler.
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index ebe7582c6..a1134b7e2 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -231,7 +231,7 @@ struct KernelCore::Impl {
void InitializeSystemResourceLimit(KernelCore& kernel,
const Core::Timing::CoreTiming& core_timing) {
system_resource_limit = KResourceLimit::Create(system.Kernel());
- system_resource_limit->Initialize(&core_timing);
+ system_resource_limit->Initialize();
KResourceLimit::Register(kernel, system_resource_limit);
const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
index 04cc5ea64..90ee43521 100644
--- a/src/core/hle/kernel/svc/svc_address_arbiter.cpp
+++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
+#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
@@ -52,7 +53,7 @@ Result WaitForAddress(Core::System& system, u64 address, ArbitrationType arb_typ
if (timeout_ns > 0) {
const s64 offset_tick(timeout_ns);
if (offset_tick > 0) {
- timeout = offset_tick + 2;
+ timeout = system.Kernel().HardwareTimer().GetTick() + offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
}
diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp
index ca120d67e..bb678e6c5 100644
--- a/src/core/hle/kernel/svc/svc_condition_variable.cpp
+++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
+#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h"
@@ -25,7 +26,7 @@ Result WaitProcessWideKeyAtomic(Core::System& system, u64 address, u64 cv_key, u
if (timeout_ns > 0) {
const s64 offset_tick(timeout_ns);
if (offset_tick > 0) {
- timeout = offset_tick + 2;
+ timeout = system.Kernel().HardwareTimer().GetTick() + offset_tick + 2;
if (timeout <= 0) {
timeout = std::numeric_limits<s64>::max();
}
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
index 373ae7c8d..6b5e1cb8d 100644
--- a/src/core/hle/kernel/svc/svc_ipc.cpp
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -5,6 +5,7 @@
#include "common/scratch_buffer.h"
#include "core/core.h"
#include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/svc.h"
@@ -82,12 +83,29 @@ Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_ad
R_TRY(session->SendReply());
}
+ // Convert the timeout from nanoseconds to ticks.
+ // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
+ s64 timeout;
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = kernel.HardwareTimer().GetTick() + offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
+ }
+
// Wait for a message.
while (true) {
// Wait for an object.
s32 index;
Result result = KSynchronizationObject::Wait(kernel, std::addressof(index), objs.data(),
- num_handles, timeout_ns);
+ num_handles, timeout);
if (result == ResultTimedOut) {
R_RETURN(result);
}
diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp
index 732bc017e..c8e820b6a 100644
--- a/src/core/hle/kernel/svc/svc_resource_limit.cpp
+++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp
@@ -21,7 +21,7 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
SCOPE_EXIT({ resource_limit->Close(); });
// Initialize the resource limit.
- resource_limit->Initialize(std::addressof(system.CoreTiming()));
+ resource_limit->Initialize();
// Register the limit.
KResourceLimit::Register(kernel, resource_limit);
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 366e8ed4a..8ebc1bd1c 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -4,6 +4,7 @@
#include "common/scope_exit.h"
#include "common/scratch_buffer.h"
#include "core/core.h"
+#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/svc.h"
@@ -83,9 +84,20 @@ Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_ha
}
});
+ // Convert the timeout from nanoseconds to ticks.
+ s64 timeout;
+ if (timeout_ns > 0) {
+ u64 ticks = kernel.HardwareTimer().GetTick();
+ ticks += timeout_ns;
+ ticks += 2;
+
+ timeout = ticks;
+ } else {
+ timeout = timeout_ns;
+ }
+
// Wait on the objects.
- Result res =
- KSynchronizationObject::Wait(kernel, out_index, objs.data(), num_handles, timeout_ns);
+ Result res = KSynchronizationObject::Wait(kernel, out_index, objs.data(), num_handles, timeout);
R_SUCCEED_IF(res == ResultSessionClosed);
R_RETURN(res);
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 92bcea72b..933b82e30 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -4,6 +4,7 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_thread.h"
@@ -42,9 +43,9 @@ Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u
R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
// Reserve a new thread from the process resource limit (waiting up to 100ms).
- KScopedResourceReservation thread_reservation(
- std::addressof(process), LimitableResource::ThreadCountMax, 1,
- system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
+ KScopedResourceReservation thread_reservation(std::addressof(process),
+ LimitableResource::ThreadCountMax, 1,
+ kernel.HardwareTimer().GetTick() + 100000000);
R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
// Create the thread.
@@ -102,20 +103,31 @@ void ExitThread(Core::System& system) {
}
/// Sleep the current thread
-void SleepThread(Core::System& system, s64 nanoseconds) {
+void SleepThread(Core::System& system, s64 ns) {
auto& kernel = system.Kernel();
- const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
+ const auto yield_type = static_cast<Svc::YieldType>(ns);
- LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
+ LOG_TRACE(Kernel_SVC, "called nanoseconds={}", ns);
// When the input tick is positive, sleep.
- if (nanoseconds > 0) {
+ if (ns > 0) {
// Convert the timeout from nanoseconds to ticks.
// NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
+ s64 timeout;
+
+ const s64 offset_tick(ns);
+ if (offset_tick > 0) {
+ timeout = kernel.HardwareTimer().GetTick() + offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
// Sleep.
// NOTE: Nintendo does not check the result of this sleep.
- static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
+ static_cast<void>(GetCurrentThread(kernel).Sleep(timeout));
} else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
KScheduler::YieldWithoutCoreMigration(kernel);
} else if (yield_type == Svc::YieldType::WithCoreMigration) {
@@ -124,7 +136,6 @@ void SleepThread(Core::System& system, s64 nanoseconds) {
KScheduler::YieldToAnyThread(kernel);
} else {
// Nintendo does nothing at all if an otherwise invalid value is passed.
- ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}
diff --git a/src/core/hle/service/am/applets/applet_web_browser.cpp b/src/core/hle/service/am/applets/applet_web_browser.cpp
index 2accf7898..1c9a1dc29 100644
--- a/src/core/hle/service/am/applets/applet_web_browser.cpp
+++ b/src/core/hle/service/am/applets/applet_web_browser.cpp
@@ -139,7 +139,7 @@ FileSys::VirtualFile GetOfflineRomFS(Core::System& system, u64 title_id,
const FileSys::PatchManager pm{title_id, system.GetFileSystemController(),
system.GetContentProvider()};
- return pm.PatchRomFS(nca->GetRomFS(), nca->GetBaseIVFCOffset(), nca_type);
+ return pm.PatchRomFS(nca.get(), nca->GetRomFS(), nca_type);
}
}
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp
index ac465d5a9..4c1ea1a5b 100644
--- a/src/core/hle/service/filesystem/filesystem.cpp
+++ b/src/core/hle/service/filesystem/filesystem.cpp
@@ -373,6 +373,11 @@ FileSys::VirtualFile FileSystemController::OpenRomFS(u64 title_id, FileSys::Stor
return romfs_factory->Open(title_id, storage_id, type);
}
+std::shared_ptr<FileSys::NCA> FileSystemController::OpenBaseNca(
+ u64 title_id, FileSys::StorageId storage_id, FileSys::ContentRecordType type) const {
+ return romfs_factory->GetEntry(title_id, storage_id, type);
+}
+
Result FileSystemController::CreateSaveData(FileSys::VirtualDir* out_save_data,
FileSys::SaveDataSpaceId space,
const FileSys::SaveDataAttribute& save_struct) const {
diff --git a/src/core/hle/service/filesystem/filesystem.h b/src/core/hle/service/filesystem/filesystem.h
index fd991f976..e7e7c4c28 100644
--- a/src/core/hle/service/filesystem/filesystem.h
+++ b/src/core/hle/service/filesystem/filesystem.h
@@ -15,6 +15,7 @@ class System;
namespace FileSys {
class BISFactory;
+class NCA;
class RegisteredCache;
class RegisteredCacheUnion;
class PlaceholderCache;
@@ -70,6 +71,8 @@ public:
FileSys::ContentRecordType type) const;
FileSys::VirtualFile OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
FileSys::ContentRecordType type) const;
+ std::shared_ptr<FileSys::NCA> OpenBaseNca(u64 title_id, FileSys::StorageId storage_id,
+ FileSys::ContentRecordType type) const;
Result CreateSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space,
const FileSys::SaveDataAttribute& save_struct) const;
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index 423a814cb..6e4d26b1e 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -1029,8 +1029,9 @@ void FSP_SRV::OpenDataStorageByDataId(HLERequestContext& ctx) {
const FileSys::PatchManager pm{title_id, fsc, content_provider};
+ auto base = fsc.OpenBaseNca(title_id, storage_id, FileSys::ContentRecordType::Data);
auto storage = std::make_shared<IStorage>(
- system, pm.PatchRomFS(std::move(data), 0, FileSys::ContentRecordType::Data));
+ system, pm.PatchRomFS(base.get(), std::move(data), FileSys::ContentRecordType::Data));
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
diff --git a/src/core/loader/loader.cpp b/src/core/loader/loader.cpp
index f24474ed8..07c65dc1a 100644
--- a/src/core/loader/loader.cpp
+++ b/src/core/loader/loader.cpp
@@ -135,7 +135,7 @@ constexpr std::array<const char*, 66> RESULT_MESSAGES{
"The titlekey and/or titlekek is incorrect or the section header is invalid.",
"The XCI file is missing a Program-type NCA.",
"The NCA file is not an application.",
- "The ExeFS partition could not be found.",
+ "The Program-type NCA contains no executable. An update may be required.",
"The XCI file has a bad header.",
"The XCI file is missing a partition.",
"The file could not be found or does not exist.",
@@ -169,7 +169,7 @@ constexpr std::array<const char*, 66> RESULT_MESSAGES{
"The BKTR-type NCA has a bad Subsection block.",
"The BKTR-type NCA has a bad Relocation bucket.",
"The BKTR-type NCA has a bad Subsection bucket.",
- "The BKTR-type NCA is missing the base RomFS.",
+ "Game updates cannot be loaded directly. Load the base game instead.",
"The NSP or XCI does not contain an update in addition to the base game.",
"The KIP file has a bad header.",
"The KIP BLZ decompression of the section failed unexpectedly.",
diff --git a/src/core/loader/loader.h b/src/core/loader/loader.h
index 7a2a52fd4..721eb8e8c 100644
--- a/src/core/loader/loader.h
+++ b/src/core/loader/loader.h
@@ -79,8 +79,6 @@ enum class ResultStatus : u16 {
ErrorBadPFSHeader,
ErrorIncorrectPFSFileSize,
ErrorBadNCAHeader,
- ErrorCompressedNCA,
- ErrorSparseNCA,
ErrorMissingProductionKeyFile,
ErrorMissingHeaderKey,
ErrorIncorrectHeaderKey,
@@ -276,16 +274,6 @@ public:
}
/**
- * Gets the difference between the start of the IVFC header and the start of level 6 (RomFS)
- * data. Needed for BKTR patching.
- *
- * @return IVFC offset for RomFS.
- */
- virtual u64 ReadRomFSIVFCOffset() const {
- return 0;
- }
-
- /**
* Get the title of the application
*
* @param[out] title Reference to store the application title into
diff --git a/src/core/loader/nax.cpp b/src/core/loader/nax.cpp
index cf35b1249..3b7b005ff 100644
--- a/src/core/loader/nax.cpp
+++ b/src/core/loader/nax.cpp
@@ -76,10 +76,6 @@ ResultStatus AppLoader_NAX::ReadRomFS(FileSys::VirtualFile& dir) {
return nca_loader->ReadRomFS(dir);
}
-u64 AppLoader_NAX::ReadRomFSIVFCOffset() const {
- return nca_loader->ReadRomFSIVFCOffset();
-}
-
ResultStatus AppLoader_NAX::ReadProgramId(u64& out_program_id) {
return nca_loader->ReadProgramId(out_program_id);
}
diff --git a/src/core/loader/nax.h b/src/core/loader/nax.h
index d7f70db43..81df2bbcd 100644
--- a/src/core/loader/nax.h
+++ b/src/core/loader/nax.h
@@ -39,7 +39,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& dir) override;
- u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadBanner(std::vector<u8>& buffer) override;
diff --git a/src/core/loader/nca.cpp b/src/core/loader/nca.cpp
index 513af194d..09d40e695 100644
--- a/src/core/loader/nca.cpp
+++ b/src/core/loader/nca.cpp
@@ -5,6 +5,8 @@
#include "core/core.h"
#include "core/file_sys/content_archive.h"
+#include "core/file_sys/nca_metadata.h"
+#include "core/file_sys/registered_cache.h"
#include "core/file_sys/romfs_factory.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/filesystem/filesystem.h"
@@ -43,9 +45,23 @@ AppLoader_NCA::LoadResult AppLoader_NCA::Load(Kernel::KProcess& process, Core::S
return {ResultStatus::ErrorNCANotProgram, {}};
}
- const auto exefs = nca->GetExeFS();
+ auto exefs = nca->GetExeFS();
if (exefs == nullptr) {
- return {ResultStatus::ErrorNoExeFS, {}};
+ LOG_INFO(Loader, "No ExeFS found in NCA, looking for ExeFS from update");
+
+ // This NCA may be a sparse base of an installed title.
+ // Try to fetch the ExeFS from the installed update.
+ const auto& installed = system.GetContentProvider();
+ const auto update_nca = installed.GetEntry(FileSys::GetUpdateTitleID(nca->GetTitleId()),
+ FileSys::ContentRecordType::Program);
+
+ if (update_nca) {
+ exefs = update_nca->GetExeFS();
+ }
+
+ if (exefs == nullptr) {
+ return {ResultStatus::ErrorNoExeFS, {}};
+ }
}
directory_loader = std::make_unique<AppLoader_DeconstructedRomDirectory>(exefs, true);
@@ -77,14 +93,6 @@ ResultStatus AppLoader_NCA::ReadRomFS(FileSys::VirtualFile& dir) {
return ResultStatus::Success;
}
-u64 AppLoader_NCA::ReadRomFSIVFCOffset() const {
- if (nca == nullptr) {
- return 0;
- }
-
- return nca->GetBaseIVFCOffset();
-}
-
ResultStatus AppLoader_NCA::ReadProgramId(u64& out_program_id) {
if (nca == nullptr || nca->GetStatus() != ResultStatus::Success) {
return ResultStatus::ErrorNotInitialized;
diff --git a/src/core/loader/nca.h b/src/core/loader/nca.h
index d22d9146e..cf356ce63 100644
--- a/src/core/loader/nca.h
+++ b/src/core/loader/nca.h
@@ -40,7 +40,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& dir) override;
- u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadBanner(std::vector<u8>& buffer) override;
diff --git a/src/core/loader/nsp.cpp b/src/core/loader/nsp.cpp
index 80663e0e0..f9b2549a3 100644
--- a/src/core/loader/nsp.cpp
+++ b/src/core/loader/nsp.cpp
@@ -121,10 +121,6 @@ ResultStatus AppLoader_NSP::ReadRomFS(FileSys::VirtualFile& out_file) {
return secondary_loader->ReadRomFS(out_file);
}
-u64 AppLoader_NSP::ReadRomFSIVFCOffset() const {
- return secondary_loader->ReadRomFSIVFCOffset();
-}
-
ResultStatus AppLoader_NSP::ReadUpdateRaw(FileSys::VirtualFile& out_file) {
if (nsp->IsExtractedType()) {
return ResultStatus::ErrorNoPackedUpdate;
diff --git a/src/core/loader/nsp.h b/src/core/loader/nsp.h
index 003cc345c..79df4586a 100644
--- a/src/core/loader/nsp.h
+++ b/src/core/loader/nsp.h
@@ -46,7 +46,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& out_file) override;
- u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadUpdateRaw(FileSys::VirtualFile& out_file) override;
ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadProgramIds(std::vector<u64>& out_program_ids) override;
diff --git a/src/core/loader/xci.cpp b/src/core/loader/xci.cpp
index c7b1b3815..3a76bc788 100644
--- a/src/core/loader/xci.cpp
+++ b/src/core/loader/xci.cpp
@@ -89,10 +89,6 @@ ResultStatus AppLoader_XCI::ReadRomFS(FileSys::VirtualFile& out_file) {
return nca_loader->ReadRomFS(out_file);
}
-u64 AppLoader_XCI::ReadRomFSIVFCOffset() const {
- return nca_loader->ReadRomFSIVFCOffset();
-}
-
ResultStatus AppLoader_XCI::ReadUpdateRaw(FileSys::VirtualFile& out_file) {
u64 program_id{};
nca_loader->ReadProgramId(program_id);
diff --git a/src/core/loader/xci.h b/src/core/loader/xci.h
index 2affb6c6e..ff05e6f62 100644
--- a/src/core/loader/xci.h
+++ b/src/core/loader/xci.h
@@ -46,7 +46,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& out_file) override;
- u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadUpdateRaw(FileSys::VirtualFile& out_file) override;
ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadProgramIds(std::vector<u64>& out_program_ids) override;
diff --git a/src/video_core/host_shaders/CMakeLists.txt b/src/video_core/host_shaders/CMakeLists.txt
index e61d9af80..c4d459077 100644
--- a/src/video_core/host_shaders/CMakeLists.txt
+++ b/src/video_core/host_shaders/CMakeLists.txt
@@ -50,6 +50,7 @@ set(SHADER_FILES
vulkan_blit_depth_stencil.frag
vulkan_color_clear.frag
vulkan_color_clear.vert
+ vulkan_depthstencil_clear.frag
vulkan_fidelityfx_fsr_easu_fp16.comp
vulkan_fidelityfx_fsr_easu_fp32.comp
vulkan_fidelityfx_fsr_rcas_fp16.comp
diff --git a/src/video_core/host_shaders/astc_decoder.comp b/src/video_core/host_shaders/astc_decoder.comp
index bf2693559..5ff17cd0c 100644
--- a/src/video_core/host_shaders/astc_decoder.comp
+++ b/src/video_core/host_shaders/astc_decoder.comp
@@ -33,26 +33,14 @@ UNIFORM(6) uint block_height_mask;
END_PUSH_CONSTANTS
struct EncodingData {
- uint encoding;
- uint num_bits;
- uint bit_value;
- uint quint_trit_value;
+ uint data;
};
-struct TexelWeightParams {
- uvec2 size;
- uint max_weight;
- bool dual_plane;
- bool error_state;
- bool void_extent_ldr;
- bool void_extent_hdr;
-};
-
-layout(binding = BINDING_INPUT_BUFFER, std430) readonly buffer InputBufferU32 {
+layout(binding = BINDING_INPUT_BUFFER, std430) readonly restrict buffer InputBufferU32 {
uvec4 astc_data[];
};
-layout(binding = BINDING_OUTPUT_IMAGE, rgba8) uniform writeonly image2DArray dest_image;
+layout(binding = BINDING_OUTPUT_IMAGE, rgba8) uniform writeonly restrict image2DArray dest_image;
const uint GOB_SIZE_X_SHIFT = 6;
const uint GOB_SIZE_Y_SHIFT = 3;
@@ -60,64 +48,21 @@ const uint GOB_SIZE_SHIFT = GOB_SIZE_X_SHIFT + GOB_SIZE_Y_SHIFT;
const uint BYTES_PER_BLOCK_LOG2 = 4;
-const int JUST_BITS = 0;
-const int QUINT = 1;
-const int TRIT = 2;
+const uint JUST_BITS = 0u;
+const uint QUINT = 1u;
+const uint TRIT = 2u;
// ASTC Encodings data, sorted in ascending order based on their BitLength value
// (see GetBitLength() function)
-EncodingData encoding_values[22] = EncodingData[](
- EncodingData(JUST_BITS, 0, 0, 0), EncodingData(JUST_BITS, 1, 0, 0), EncodingData(TRIT, 0, 0, 0),
- EncodingData(JUST_BITS, 2, 0, 0), EncodingData(QUINT, 0, 0, 0), EncodingData(TRIT, 1, 0, 0),
- EncodingData(JUST_BITS, 3, 0, 0), EncodingData(QUINT, 1, 0, 0), EncodingData(TRIT, 2, 0, 0),
- EncodingData(JUST_BITS, 4, 0, 0), EncodingData(QUINT, 2, 0, 0), EncodingData(TRIT, 3, 0, 0),
- EncodingData(JUST_BITS, 5, 0, 0), EncodingData(QUINT, 3, 0, 0), EncodingData(TRIT, 4, 0, 0),
- EncodingData(JUST_BITS, 6, 0, 0), EncodingData(QUINT, 4, 0, 0), EncodingData(TRIT, 5, 0, 0),
- EncodingData(JUST_BITS, 7, 0, 0), EncodingData(QUINT, 5, 0, 0), EncodingData(TRIT, 6, 0, 0),
- EncodingData(JUST_BITS, 8, 0, 0)
-);
-
-// The following constants are expanded variants of the Replicate()
-// function calls corresponding to the following arguments:
-// value: index into the generated table
-// num_bits: the after "REPLICATE" in the table name. i.e. 4 is num_bits in REPLICATE_4.
-// to_bit: the integer after "TO_"
-const uint REPLICATE_BIT_TO_7_TABLE[2] = uint[](0, 127);
-const uint REPLICATE_1_BIT_TO_9_TABLE[2] = uint[](0, 511);
-
-const uint REPLICATE_1_BIT_TO_8_TABLE[2] = uint[](0, 255);
-const uint REPLICATE_2_BIT_TO_8_TABLE[4] = uint[](0, 85, 170, 255);
-const uint REPLICATE_3_BIT_TO_8_TABLE[8] = uint[](0, 36, 73, 109, 146, 182, 219, 255);
-const uint REPLICATE_4_BIT_TO_8_TABLE[16] =
- uint[](0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255);
-const uint REPLICATE_5_BIT_TO_8_TABLE[32] =
- uint[](0, 8, 16, 24, 33, 41, 49, 57, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140, 148, 156, 165,
- 173, 181, 189, 198, 206, 214, 222, 231, 239, 247, 255);
-const uint REPLICATE_1_BIT_TO_6_TABLE[2] = uint[](0, 63);
-const uint REPLICATE_2_BIT_TO_6_TABLE[4] = uint[](0, 21, 42, 63);
-const uint REPLICATE_3_BIT_TO_6_TABLE[8] = uint[](0, 9, 18, 27, 36, 45, 54, 63);
-const uint REPLICATE_4_BIT_TO_6_TABLE[16] =
- uint[](0, 4, 8, 12, 17, 21, 25, 29, 34, 38, 42, 46, 51, 55, 59, 63);
-const uint REPLICATE_5_BIT_TO_6_TABLE[32] =
- uint[](0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 33, 35, 37, 39, 41, 43, 45,
- 47, 49, 51, 53, 55, 57, 59, 61, 63);
-const uint REPLICATE_6_BIT_TO_8_TABLE[64] =
- uint[](0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 65, 69, 73, 77, 81, 85, 89,
- 93, 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138, 142, 146, 150, 154, 158, 162,
- 166, 170, 174, 178, 182, 186, 190, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235,
- 239, 243, 247, 251, 255);
-const uint REPLICATE_7_BIT_TO_8_TABLE[128] =
- uint[](0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44,
- 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88,
- 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126,
- 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163,
- 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199,
- 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235,
- 237, 239, 241, 243, 245, 247, 249, 251, 253, 255);
+const uint encoding_values[22] = uint[](
+ (JUST_BITS), (JUST_BITS | (1u << 8u)), (TRIT), (JUST_BITS | (2u << 8u)),
+ (QUINT), (TRIT | (1u << 8u)), (JUST_BITS | (3u << 8u)), (QUINT | (1u << 8u)),
+ (TRIT | (2u << 8u)), (JUST_BITS | (4u << 8u)), (QUINT | (2u << 8u)), (TRIT | (3u << 8u)),
+ (JUST_BITS | (5u << 8u)), (QUINT | (3u << 8u)), (TRIT | (4u << 8u)), (JUST_BITS | (6u << 8u)),
+ (QUINT | (4u << 8u)), (TRIT | (5u << 8u)), (JUST_BITS | (7u << 8u)), (QUINT | (5u << 8u)),
+ (TRIT | (6u << 8u)), (JUST_BITS | (8u << 8u)));
// Input ASTC texture globals
-uint current_index = 0;
-int bitsread = 0;
int total_bitsread = 0;
uvec4 local_buff;
@@ -125,50 +70,60 @@ uvec4 local_buff;
uvec4 color_endpoint_data;
int color_bitsread = 0;
-// Four values, two endpoints, four maximum partitions
-uint color_values[32];
-int colvals_index = 0;
-
-// Weight data globals
-uvec4 texel_weight_data;
-int texel_bitsread = 0;
+// Global "vector" to be pushed into when decoding
+// At most will require BLOCK_WIDTH x BLOCK_HEIGHT in single plane mode
+// At most will require BLOCK_WIDTH x BLOCK_HEIGHT x 2 in dual plane mode
+// So the maximum would be 144 (12 x 12) elements, x 2 for two planes
+#define DIVCEIL(number, divisor) (number + divisor - 1) / divisor
+#define ARRAY_NUM_ELEMENTS 144
+#define VECTOR_ARRAY_SIZE DIVCEIL(ARRAY_NUM_ELEMENTS * 2, 4)
+uint result_vector[ARRAY_NUM_ELEMENTS * 2];
-bool texel_flag = false;
-
-// Global "vectors" to be pushed into when decoding
-EncodingData result_vector[144];
int result_index = 0;
+uint result_vector_max_index;
+bool result_limit_reached = false;
-EncodingData texel_vector[144];
-int texel_vector_index = 0;
+// EncodingData helpers
+uint Encoding(EncodingData val) {
+ return bitfieldExtract(val.data, 0, 8);
+}
+uint NumBits(EncodingData val) {
+ return bitfieldExtract(val.data, 8, 8);
+}
+uint BitValue(EncodingData val) {
+ return bitfieldExtract(val.data, 16, 8);
+}
+uint QuintTritValue(EncodingData val) {
+ return bitfieldExtract(val.data, 24, 8);
+}
-uint unquantized_texel_weights[2][144];
+void Encoding(inout EncodingData val, uint v) {
+ val.data = bitfieldInsert(val.data, v, 0, 8);
+}
+void NumBits(inout EncodingData val, uint v) {
+ val.data = bitfieldInsert(val.data, v, 8, 8);
+}
+void BitValue(inout EncodingData val, uint v) {
+ val.data = bitfieldInsert(val.data, v, 16, 8);
+}
+void QuintTritValue(inout EncodingData val, uint v) {
+ val.data = bitfieldInsert(val.data, v, 24, 8);
+}
-uint SwizzleOffset(uvec2 pos) {
- uint x = pos.x;
- uint y = pos.y;
- return ((x % 64) / 32) * 256 + ((y % 8) / 2) * 64 + ((x % 32) / 16) * 32 +
- (y % 2) * 16 + (x % 16);
+EncodingData CreateEncodingData(uint encoding, uint num_bits, uint bit_val, uint quint_trit_val) {
+ return EncodingData(((encoding) << 0u) | ((num_bits) << 8u) |
+ ((bit_val) << 16u) | ((quint_trit_val) << 24u));
}
-// Replicates low num_bits such that [(to_bit - 1):(to_bit - 1 - from_bit)]
-// is the same as [(num_bits - 1):0] and repeats all the way down.
-uint Replicate(uint val, uint num_bits, uint to_bit) {
- const uint v = val & uint((1 << num_bits) - 1);
- uint res = v;
- uint reslen = num_bits;
- while (reslen < to_bit) {
- uint comp = 0;
- if (num_bits > to_bit - reslen) {
- uint newshift = to_bit - reslen;
- comp = num_bits - newshift;
- num_bits = newshift;
- }
- res = uint(res << num_bits);
- res = uint(res | (v >> comp));
- reslen += num_bits;
+
+void ResultEmplaceBack(EncodingData val) {
+ if (result_index >= result_vector_max_index) {
+ // Alert callers to avoid decoding more than needed by this phase
+ result_limit_reached = true;
+ return;
}
- return res;
+ result_vector[result_index] = val.data;
+ ++result_index;
}
uvec4 ReplicateByteTo16(uvec4 value) {
@@ -176,64 +131,40 @@ uvec4 ReplicateByteTo16(uvec4 value) {
}
uint ReplicateBitTo7(uint value) {
- return REPLICATE_BIT_TO_7_TABLE[value];
+ return value * 127;
}
uint ReplicateBitTo9(uint value) {
- return REPLICATE_1_BIT_TO_9_TABLE[value];
+ return value * 511;
}
-uint FastReplicate(uint value, uint num_bits, uint to_bit) {
- if (num_bits == 0) {
+uint ReplicateBits(uint value, uint num_bits, uint to_bit) {
+ if (value == 0 || num_bits == 0) {
return 0;
}
- if (num_bits == to_bit) {
+ if (num_bits >= to_bit) {
return value;
}
- if (to_bit == 6) {
- switch (num_bits) {
- case 1:
- return REPLICATE_1_BIT_TO_6_TABLE[value];
- case 2:
- return REPLICATE_2_BIT_TO_6_TABLE[value];
- case 3:
- return REPLICATE_3_BIT_TO_6_TABLE[value];
- case 4:
- return REPLICATE_4_BIT_TO_6_TABLE[value];
- case 5:
- return REPLICATE_5_BIT_TO_6_TABLE[value];
- default:
- break;
- }
- } else { /* if (to_bit == 8) */
- switch (num_bits) {
- case 1:
- return REPLICATE_1_BIT_TO_8_TABLE[value];
- case 2:
- return REPLICATE_2_BIT_TO_8_TABLE[value];
- case 3:
- return REPLICATE_3_BIT_TO_8_TABLE[value];
- case 4:
- return REPLICATE_4_BIT_TO_8_TABLE[value];
- case 5:
- return REPLICATE_5_BIT_TO_8_TABLE[value];
- case 6:
- return REPLICATE_6_BIT_TO_8_TABLE[value];
- case 7:
- return REPLICATE_7_BIT_TO_8_TABLE[value];
- default:
- break;
- }
+ const uint v = value & uint((1 << num_bits) - 1);
+ uint res = v;
+ uint reslen = num_bits;
+ while (reslen < to_bit) {
+ const uint num_dst_bits_to_shift_up = min(num_bits, to_bit - reslen);
+ const uint num_src_bits_to_shift_down = num_bits - num_dst_bits_to_shift_up;
+
+ res <<= num_dst_bits_to_shift_up;
+ res |= (v >> num_src_bits_to_shift_down);
+ reslen += num_bits;
}
- return Replicate(value, num_bits, to_bit);
+ return res;
}
uint FastReplicateTo8(uint value, uint num_bits) {
- return FastReplicate(value, num_bits, 8);
+ return ReplicateBits(value, num_bits, 8);
}
uint FastReplicateTo6(uint value, uint num_bits) {
- return FastReplicate(value, num_bits, 6);
+ return ReplicateBits(value, num_bits, 6);
}
uint Div3Floor(uint v) {
@@ -266,15 +197,15 @@ uint Hash52(uint p) {
return p;
}
-uint Select2DPartition(uint seed, uint x, uint y, uint partition_count, bool small_block) {
- if (small_block) {
+uint Select2DPartition(uint seed, uint x, uint y, uint partition_count) {
+ if ((block_dims.y * block_dims.x) < 32) {
x <<= 1;
y <<= 1;
}
seed += (partition_count - 1) * 1024;
- uint rnum = Hash52(uint(seed));
+ const uint rnum = Hash52(uint(seed));
uint seed1 = uint(rnum & 0xF);
uint seed2 = uint((rnum >> 4) & 0xF);
uint seed3 = uint((rnum >> 8) & 0xF);
@@ -342,53 +273,52 @@ uint ExtractBits(uvec4 payload, int offset, int bits) {
if (bits <= 0) {
return 0;
}
- int last_offset = offset + bits - 1;
- int shifted_offset = offset >> 5;
+ if (bits > 32) {
+ return 0;
+ }
+ const int last_offset = offset + bits - 1;
+ const int shifted_offset = offset >> 5;
if ((last_offset >> 5) == shifted_offset) {
return bitfieldExtract(payload[shifted_offset], offset & 31, bits);
}
- int first_bits = 32 - (offset & 31);
- int result_first = int(bitfieldExtract(payload[shifted_offset], offset & 31, first_bits));
- int result_second = int(bitfieldExtract(payload[shifted_offset + 1], 0, bits - first_bits));
+ const int first_bits = 32 - (offset & 31);
+ const int result_first = int(bitfieldExtract(payload[shifted_offset], offset & 31, first_bits));
+ const int result_second = int(bitfieldExtract(payload[shifted_offset + 1], 0, bits - first_bits));
return result_first | (result_second << first_bits);
}
uint StreamBits(uint num_bits) {
- int int_bits = int(num_bits);
- uint ret = ExtractBits(local_buff, total_bitsread, int_bits);
+ const int int_bits = int(num_bits);
+ const uint ret = ExtractBits(local_buff, total_bitsread, int_bits);
total_bitsread += int_bits;
return ret;
}
+void SkipBits(uint num_bits) {
+ const int int_bits = int(num_bits);
+ total_bitsread += int_bits;
+}
+
uint StreamColorBits(uint num_bits) {
- uint ret = 0;
- int int_bits = int(num_bits);
- if (texel_flag) {
- ret = ExtractBits(texel_weight_data, texel_bitsread, int_bits);
- texel_bitsread += int_bits;
- } else {
- ret = ExtractBits(color_endpoint_data, color_bitsread, int_bits);
- color_bitsread += int_bits;
- }
+ const int int_bits = int(num_bits);
+ const uint ret = ExtractBits(color_endpoint_data, color_bitsread, int_bits);
+ color_bitsread += int_bits;
return ret;
}
-void ResultEmplaceBack(EncodingData val) {
- if (texel_flag) {
- texel_vector[texel_vector_index] = val;
- ++texel_vector_index;
- } else {
- result_vector[result_index] = val;
- ++result_index;
- }
+EncodingData GetEncodingFromVector(uint index) {
+ const uint data = result_vector[index];
+ return EncodingData(data);
}
// Returns the number of bits required to encode n_vals values.
uint GetBitLength(uint n_vals, uint encoding_index) {
- uint total_bits = encoding_values[encoding_index].num_bits * n_vals;
- if (encoding_values[encoding_index].encoding == TRIT) {
+ const EncodingData encoding_value = EncodingData(encoding_values[encoding_index]);
+ const uint encoding = Encoding(encoding_value);
+ uint total_bits = NumBits(encoding_value) * n_vals;
+ if (encoding == TRIT) {
total_bits += Div5Ceil(n_vals * 8);
- } else if (encoding_values[encoding_index].encoding == QUINT) {
+ } else if (encoding == QUINT) {
total_bits += Div3Ceil(n_vals * 7);
}
return total_bits;
@@ -403,7 +333,7 @@ uint GetNumWeightValues(uvec2 size, bool dual_plane) {
}
uint GetPackedBitSize(uvec2 size, bool dual_plane, uint max_weight) {
- uint n_vals = GetNumWeightValues(size, dual_plane);
+ const uint n_vals = GetNumWeightValues(size, dual_plane);
return GetBitLength(n_vals, max_weight);
}
@@ -412,87 +342,74 @@ uint BitsBracket(uint bits, uint pos) {
}
uint BitsOp(uint bits, uint start, uint end) {
- if (start == end) {
- return BitsBracket(bits, start);
- } else if (start > end) {
- uint t = start;
- start = end;
- end = t;
- }
-
- uint mask = (1 << (end - start + 1)) - 1;
+ const uint mask = (1 << (end - start + 1)) - 1;
return ((bits >> start) & mask);
}
void DecodeQuintBlock(uint num_bits) {
- uint m[3];
- uint q[3];
- uint Q;
+ uvec3 m;
+ uvec4 qQ;
m[0] = StreamColorBits(num_bits);
- Q = StreamColorBits(3);
+ qQ.w = StreamColorBits(3);
m[1] = StreamColorBits(num_bits);
- Q |= StreamColorBits(2) << 3;
+ qQ.w |= StreamColorBits(2) << 3;
m[2] = StreamColorBits(num_bits);
- Q |= StreamColorBits(2) << 5;
- if (BitsOp(Q, 1, 2) == 3 && BitsOp(Q, 5, 6) == 0) {
- q[0] = 4;
- q[1] = 4;
- q[2] = (BitsBracket(Q, 0) << 2) | ((BitsBracket(Q, 4) & ~BitsBracket(Q, 0)) << 1) |
- (BitsBracket(Q, 3) & ~BitsBracket(Q, 0));
+ qQ.w |= StreamColorBits(2) << 5;
+ if (BitsOp(qQ.w, 1, 2) == 3 && BitsOp(qQ.w, 5, 6) == 0) {
+ qQ.x = 4;
+ qQ.y = 4;
+ qQ.z = (BitsBracket(qQ.w, 0) << 2) | ((BitsBracket(qQ.w, 4) & ~BitsBracket(qQ.w, 0)) << 1) |
+ (BitsBracket(qQ.w, 3) & ~BitsBracket(qQ.w, 0));
} else {
uint C = 0;
- if (BitsOp(Q, 1, 2) == 3) {
- q[2] = 4;
- C = (BitsOp(Q, 3, 4) << 3) | ((~BitsOp(Q, 5, 6) & 3) << 1) | BitsBracket(Q, 0);
+ if (BitsOp(qQ.w, 1, 2) == 3) {
+ qQ.z = 4;
+ C = (BitsOp(qQ.w, 3, 4) << 3) | ((~BitsOp(qQ.w, 5, 6) & 3) << 1) | BitsBracket(qQ.w, 0);
} else {
- q[2] = BitsOp(Q, 5, 6);
- C = BitsOp(Q, 0, 4);
+ qQ.z = BitsOp(qQ.w, 5, 6);
+ C = BitsOp(qQ.w, 0, 4);
}
if (BitsOp(C, 0, 2) == 5) {
- q[1] = 4;
- q[0] = BitsOp(C, 3, 4);
+ qQ.y = 4;
+ qQ.x = BitsOp(C, 3, 4);
} else {
- q[1] = BitsOp(C, 3, 4);
- q[0] = BitsOp(C, 0, 2);
+ qQ.y = BitsOp(C, 3, 4);
+ qQ.x = BitsOp(C, 0, 2);
}
}
for (uint i = 0; i < 3; i++) {
- EncodingData val;
- val.encoding = QUINT;
- val.num_bits = num_bits;
- val.bit_value = m[i];
- val.quint_trit_value = q[i];
+ const EncodingData val = CreateEncodingData(QUINT, num_bits, m[i], qQ[i]);
ResultEmplaceBack(val);
}
}
void DecodeTritBlock(uint num_bits) {
- uint m[5];
- uint t[5];
- uint T;
+ uvec4 m;
+ uvec4 t;
+ uvec3 Tm5t5;
m[0] = StreamColorBits(num_bits);
- T = StreamColorBits(2);
+ Tm5t5.x = StreamColorBits(2);
m[1] = StreamColorBits(num_bits);
- T |= StreamColorBits(2) << 2;
+ Tm5t5.x |= StreamColorBits(2) << 2;
m[2] = StreamColorBits(num_bits);
- T |= StreamColorBits(1) << 4;
+ Tm5t5.x |= StreamColorBits(1) << 4;
m[3] = StreamColorBits(num_bits);
- T |= StreamColorBits(2) << 5;
- m[4] = StreamColorBits(num_bits);
- T |= StreamColorBits(1) << 7;
+ Tm5t5.x |= StreamColorBits(2) << 5;
+ Tm5t5.y = StreamColorBits(num_bits);
+ Tm5t5.x |= StreamColorBits(1) << 7;
uint C = 0;
- if (BitsOp(T, 2, 4) == 7) {
- C = (BitsOp(T, 5, 7) << 2) | BitsOp(T, 0, 1);
- t[4] = 2;
+ if (BitsOp(Tm5t5.x, 2, 4) == 7) {
+ C = (BitsOp(Tm5t5.x, 5, 7) << 2) | BitsOp(Tm5t5.x, 0, 1);
+ Tm5t5.z = 2;
t[3] = 2;
} else {
- C = BitsOp(T, 0, 4);
- if (BitsOp(T, 5, 6) == 3) {
- t[4] = 2;
- t[3] = BitsBracket(T, 7);
+ C = BitsOp(Tm5t5.x, 0, 4);
+ if (BitsOp(Tm5t5.x, 5, 6) == 3) {
+ Tm5t5.z = 2;
+ t[3] = BitsBracket(Tm5t5.x, 7);
} else {
- t[4] = BitsBracket(T, 7);
- t[3] = BitsOp(T, 5, 6);
+ Tm5t5.z = BitsBracket(Tm5t5.x, 7);
+ t[3] = BitsOp(Tm5t5.x, 5, 6);
}
}
if (BitsOp(C, 0, 1) == 3) {
@@ -508,31 +425,31 @@ void DecodeTritBlock(uint num_bits) {
t[1] = BitsOp(C, 2, 3);
t[0] = (BitsBracket(C, 1) << 1) | (BitsBracket(C, 0) & ~BitsBracket(C, 1));
}
- for (uint i = 0; i < 5; i++) {
- EncodingData val;
- val.encoding = TRIT;
- val.num_bits = num_bits;
- val.bit_value = m[i];
- val.quint_trit_value = t[i];
+ for (uint i = 0; i < 4; i++) {
+ const EncodingData val = CreateEncodingData(TRIT, num_bits, m[i], t[i]);
ResultEmplaceBack(val);
}
+ const EncodingData val = CreateEncodingData(TRIT, num_bits, Tm5t5.y, Tm5t5.z);
+ ResultEmplaceBack(val);
}
void DecodeIntegerSequence(uint max_range, uint num_values) {
- EncodingData val = encoding_values[max_range];
+ EncodingData val = EncodingData(encoding_values[max_range]);
+ const uint encoding = Encoding(val);
+ const uint num_bits = NumBits(val);
uint vals_decoded = 0;
- while (vals_decoded < num_values) {
- switch (val.encoding) {
+ while (vals_decoded < num_values && !result_limit_reached) {
+ switch (encoding) {
case QUINT:
- DecodeQuintBlock(val.num_bits);
+ DecodeQuintBlock(num_bits);
vals_decoded += 3;
break;
case TRIT:
- DecodeTritBlock(val.num_bits);
+ DecodeTritBlock(num_bits);
vals_decoded += 5;
break;
case JUST_BITS:
- val.bit_value = StreamColorBits(val.num_bits);
+ BitValue(val, StreamColorBits(num_bits));
ResultEmplaceBack(val);
vals_decoded++;
break;
@@ -540,7 +457,7 @@ void DecodeIntegerSequence(uint max_range, uint num_values) {
}
}
-void DecodeColorValues(uvec4 modes, uint num_partitions, uint color_data_bits) {
+void DecodeColorValues(uvec4 modes, uint num_partitions, uint color_data_bits, out uint color_values[32]) {
uint num_values = 0;
for (uint i = 0; i < num_partitions; i++) {
num_values += ((modes[i] >> 2) + 1) << 1;
@@ -549,7 +466,7 @@ void DecodeColorValues(uvec4 modes, uint num_partitions, uint color_data_bits) {
// TODO(ameerj): profile with binary search
int range = 0;
while (++range < encoding_values.length()) {
- uint bit_length = GetBitLength(num_values, range);
+ const uint bit_length = GetBitLength(num_values, range);
if (bit_length > color_data_bits) {
break;
}
@@ -560,48 +477,49 @@ void DecodeColorValues(uvec4 modes, uint num_partitions, uint color_data_bits) {
if (out_index >= num_values) {
break;
}
- EncodingData val = result_vector[itr];
- uint bitlen = val.num_bits;
- uint bitval = val.bit_value;
+ const EncodingData val = GetEncodingFromVector(itr);
+ const uint encoding = Encoding(val);
+ const uint bitlen = NumBits(val);
+ const uint bitval = BitValue(val);
uint A = 0, B = 0, C = 0, D = 0;
A = ReplicateBitTo9((bitval & 1));
- switch (val.encoding) {
+ switch (encoding) {
case JUST_BITS:
- color_values[out_index++] = FastReplicateTo8(bitval, bitlen);
+ color_values[++out_index] = FastReplicateTo8(bitval, bitlen);
break;
case TRIT: {
- D = val.quint_trit_value;
+ D = QuintTritValue(val);
switch (bitlen) {
case 1:
C = 204;
break;
case 2: {
C = 93;
- uint b = (bitval >> 1) & 1;
+ const uint b = (bitval >> 1) & 1;
B = (b << 8) | (b << 4) | (b << 2) | (b << 1);
break;
}
case 3: {
C = 44;
- uint cb = (bitval >> 1) & 3;
+ const uint cb = (bitval >> 1) & 3;
B = (cb << 7) | (cb << 2) | cb;
break;
}
case 4: {
C = 22;
- uint dcb = (bitval >> 1) & 7;
+ const uint dcb = (bitval >> 1) & 7;
B = (dcb << 6) | dcb;
break;
}
case 5: {
C = 11;
- uint edcb = (bitval >> 1) & 0xF;
+ const uint edcb = (bitval >> 1) & 0xF;
B = (edcb << 5) | (edcb >> 2);
break;
}
case 6: {
C = 5;
- uint fedcb = (bitval >> 1) & 0x1F;
+ const uint fedcb = (bitval >> 1) & 0x1F;
B = (fedcb << 4) | (fedcb >> 4);
break;
}
@@ -609,32 +527,32 @@ void DecodeColorValues(uvec4 modes, uint num_partitions, uint color_data_bits) {
break;
}
case QUINT: {
- D = val.quint_trit_value;
+ D = QuintTritValue(val);
switch (bitlen) {
case 1:
C = 113;
break;
case 2: {
C = 54;
- uint b = (bitval >> 1) & 1;
+ const uint b = (bitval >> 1) & 1;
B = (b << 8) | (b << 3) | (b << 2);
break;
}
case 3: {
C = 26;
- uint cb = (bitval >> 1) & 3;
+ const uint cb = (bitval >> 1) & 3;
B = (cb << 7) | (cb << 1) | (cb >> 1);
break;
}
case 4: {
C = 13;
- uint dcb = (bitval >> 1) & 7;
+ const uint dcb = (bitval >> 1) & 7;
B = (dcb << 6) | (dcb >> 1);
break;
}
case 5: {
C = 6;
- uint edcb = (bitval >> 1) & 0xF;
+ const uint edcb = (bitval >> 1) & 0xF;
B = (edcb << 5) | (edcb >> 3);
break;
}
@@ -642,11 +560,11 @@ void DecodeColorValues(uvec4 modes, uint num_partitions, uint color_data_bits) {
break;
}
}
- if (val.encoding != JUST_BITS) {
+ if (encoding != JUST_BITS) {
uint T = (D * C) + B;
T ^= A;
T = (A & 0x80) | (T >> 2);
- color_values[out_index++] = T;
+ color_values[++out_index] = T;
}
}
}
@@ -664,139 +582,136 @@ ivec2 BitTransferSigned(int a, int b) {
}
uvec4 ClampByte(ivec4 color) {
- for (uint i = 0; i < 4; ++i) {
- color[i] = (color[i] < 0) ? 0 : ((color[i] > 255) ? 255 : color[i]);
- }
- return uvec4(color);
+ return uvec4(clamp(color, 0, 255));
}
ivec4 BlueContract(int a, int r, int g, int b) {
return ivec4(a, (r + b) >> 1, (g + b) >> 1, b);
}
-void ComputeEndpoints(out uvec4 ep1, out uvec4 ep2, uint color_endpoint_mode) {
+void ComputeEndpoints(out uvec4 ep1, out uvec4 ep2, uint color_endpoint_mode, uint color_values[32],
+ inout uint colvals_index) {
#define READ_UINT_VALUES(N) \
- uint v[N]; \
+ uvec4 V[2]; \
for (uint i = 0; i < N; i++) { \
- v[i] = color_values[colvals_index++]; \
+ V[i / 4][i % 4] = color_values[++colvals_index]; \
}
-
#define READ_INT_VALUES(N) \
- int v[N]; \
+ ivec4 V[2]; \
for (uint i = 0; i < N; i++) { \
- v[i] = int(color_values[colvals_index++]); \
+ V[i / 4][i % 4] = int(color_values[++colvals_index]); \
}
switch (color_endpoint_mode) {
case 0: {
READ_UINT_VALUES(2)
- ep1 = uvec4(0xFF, v[0], v[0], v[0]);
- ep2 = uvec4(0xFF, v[1], v[1], v[1]);
+ ep1 = uvec4(0xFF, V[0].x, V[0].x, V[0].x);
+ ep2 = uvec4(0xFF, V[0].y, V[0].y, V[0].y);
break;
}
case 1: {
READ_UINT_VALUES(2)
- uint L0 = (v[0] >> 2) | (v[1] & 0xC0);
- uint L1 = min(L0 + (v[1] & 0x3F), 0xFFU);
+ const uint L0 = (V[0].x >> 2) | (V[0].y & 0xC0);
+ const uint L1 = min(L0 + (V[0].y & 0x3F), 0xFFU);
ep1 = uvec4(0xFF, L0, L0, L0);
ep2 = uvec4(0xFF, L1, L1, L1);
break;
}
case 4: {
READ_UINT_VALUES(4)
- ep1 = uvec4(v[2], v[0], v[0], v[0]);
- ep2 = uvec4(v[3], v[1], v[1], v[1]);
+ ep1 = uvec4(V[0].z, V[0].x, V[0].x, V[0].x);
+ ep2 = uvec4(V[0].w, V[0].y, V[0].y, V[0].y);
break;
}
case 5: {
READ_INT_VALUES(4)
- ivec2 transferred = BitTransferSigned(v[1], v[0]);
- v[1] = transferred.x;
- v[0] = transferred.y;
- transferred = BitTransferSigned(v[3], v[2]);
- v[3] = transferred.x;
- v[2] = transferred.y;
- ep1 = ClampByte(ivec4(v[2], v[0], v[0], v[0]));
- ep2 = ClampByte(ivec4(v[2] + v[3], v[0] + v[1], v[0] + v[1], v[0] + v[1]));
+ ivec2 transferred = BitTransferSigned(V[0].y, V[0].x);
+ V[0].y = transferred.x;
+ V[0].x = transferred.y;
+ transferred = BitTransferSigned(V[0].w, V[0].z);
+ V[0].w = transferred.x;
+ V[0].z = transferred.y;
+ ep1 = ClampByte(ivec4(V[0].z, V[0].x, V[0].x, V[0].x));
+ ep2 = ClampByte(ivec4(V[0].z + V[0].w, V[0].x + V[0].y, V[0].x + V[0].y, V[0].x + V[0].y));
break;
}
case 6: {
READ_UINT_VALUES(4)
- ep1 = uvec4(0xFF, (v[0] * v[3]) >> 8, (v[1] * v[3]) >> 8, (v[2] * v[3]) >> 8);
- ep2 = uvec4(0xFF, v[0], v[1], v[2]);
+ ep1 = uvec4(0xFF, (V[0].x * V[0].w) >> 8, (V[0].y * V[0].w) >> 8, (V[0].z * V[0].w) >> 8);
+ ep2 = uvec4(0xFF, V[0].x, V[0].y, V[0].z);
break;
}
case 8: {
READ_UINT_VALUES(6)
- if ((v[1] + v[3] + v[5]) >= (v[0] + v[2] + v[4])) {
- ep1 = uvec4(0xFF, v[0], v[2], v[4]);
- ep2 = uvec4(0xFF, v[1], v[3], v[5]);
+ if ((V[0].y + V[0].w + V[1].y) >= (V[0].x + V[0].z + V[1].x)) {
+ ep1 = uvec4(0xFF, V[0].x, V[0].z, V[1].x);
+ ep2 = uvec4(0xFF, V[0].y, V[0].w, V[1].y);
} else {
- ep1 = uvec4(BlueContract(0xFF, int(v[1]), int(v[3]), int(v[5])));
- ep2 = uvec4(BlueContract(0xFF, int(v[0]), int(v[2]), int(v[4])));
+ ep1 = uvec4(BlueContract(0xFF, int(V[0].y), int(V[0].w), int(V[1].y)));
+ ep2 = uvec4(BlueContract(0xFF, int(V[0].x), int(V[0].z), int(V[1].x)));
}
break;
}
case 9: {
READ_INT_VALUES(6)
- ivec2 transferred = BitTransferSigned(v[1], v[0]);
- v[1] = transferred.x;
- v[0] = transferred.y;
- transferred = BitTransferSigned(v[3], v[2]);
- v[3] = transferred.x;
- v[2] = transferred.y;
- transferred = BitTransferSigned(v[5], v[4]);
- v[5] = transferred.x;
- v[4] = transferred.y;
- if ((v[1] + v[3] + v[5]) >= 0) {
- ep1 = ClampByte(ivec4(0xFF, v[0], v[2], v[4]));
- ep2 = ClampByte(ivec4(0xFF, v[0] + v[1], v[2] + v[3], v[4] + v[5]));
+ ivec2 transferred = BitTransferSigned(V[0].y, V[0].x);
+ V[0].y = transferred.x;
+ V[0].x = transferred.y;
+ transferred = BitTransferSigned(V[0].w, V[0].z);
+ V[0].w = transferred.x;
+ V[0].z = transferred.y;
+ transferred = BitTransferSigned(V[1].y, V[1].x);
+ V[1].y = transferred.x;
+ V[1].x = transferred.y;
+ if ((V[0].y + V[0].w + V[1].y) >= 0) {
+ ep1 = ClampByte(ivec4(0xFF, V[0].x, V[0].z, V[1].x));
+ ep2 = ClampByte(ivec4(0xFF, V[0].x + V[0].y, V[0].z + V[0].w, V[1].x + V[1].y));
} else {
- ep1 = ClampByte(BlueContract(0xFF, v[0] + v[1], v[2] + v[3], v[4] + v[5]));
- ep2 = ClampByte(BlueContract(0xFF, v[0], v[2], v[4]));
+ ep1 = ClampByte(BlueContract(0xFF, V[0].x + V[0].y, V[0].z + V[0].w, V[1].x + V[1].y));
+ ep2 = ClampByte(BlueContract(0xFF, V[0].x, V[0].z, V[1].x));
}
break;
}
case 10: {
READ_UINT_VALUES(6)
- ep1 = uvec4(v[4], (v[0] * v[3]) >> 8, (v[1] * v[3]) >> 8, (v[2] * v[3]) >> 8);
- ep2 = uvec4(v[5], v[0], v[1], v[2]);
+ ep1 = uvec4(V[1].x, (V[0].x * V[0].w) >> 8, (V[0].y * V[0].w) >> 8, (V[0].z * V[0].w) >> 8);
+ ep2 = uvec4(V[1].y, V[0].x, V[0].y, V[0].z);
break;
}
case 12: {
READ_UINT_VALUES(8)
- if ((v[1] + v[3] + v[5]) >= (v[0] + v[2] + v[4])) {
- ep1 = uvec4(v[6], v[0], v[2], v[4]);
- ep2 = uvec4(v[7], v[1], v[3], v[5]);
+ if ((V[0].y + V[0].w + V[1].y) >= (V[0].x + V[0].z + V[1].x)) {
+ ep1 = uvec4(V[1].z, V[0].x, V[0].z, V[1].x);
+ ep2 = uvec4(V[1].w, V[0].y, V[0].w, V[1].y);
} else {
- ep1 = uvec4(BlueContract(int(v[7]), int(v[1]), int(v[3]), int(v[5])));
- ep2 = uvec4(BlueContract(int(v[6]), int(v[0]), int(v[2]), int(v[4])));
+ ep1 = uvec4(BlueContract(int(V[1].w), int(V[0].y), int(V[0].w), int(V[1].y)));
+ ep2 = uvec4(BlueContract(int(V[1].z), int(V[0].x), int(V[0].z), int(V[1].x)));
}
break;
}
case 13: {
READ_INT_VALUES(8)
- ivec2 transferred = BitTransferSigned(v[1], v[0]);
- v[1] = transferred.x;
- v[0] = transferred.y;
- transferred = BitTransferSigned(v[3], v[2]);
- v[3] = transferred.x;
- v[2] = transferred.y;
-
- transferred = BitTransferSigned(v[5], v[4]);
- v[5] = transferred.x;
- v[4] = transferred.y;
-
- transferred = BitTransferSigned(v[7], v[6]);
- v[7] = transferred.x;
- v[6] = transferred.y;
-
- if ((v[1] + v[3] + v[5]) >= 0) {
- ep1 = ClampByte(ivec4(v[6], v[0], v[2], v[4]));
- ep2 = ClampByte(ivec4(v[7] + v[6], v[0] + v[1], v[2] + v[3], v[4] + v[5]));
+ ivec2 transferred = BitTransferSigned(V[0].y, V[0].x);
+ V[0].y = transferred.x;
+ V[0].x = transferred.y;
+ transferred = BitTransferSigned(V[0].w, V[0].z);
+ V[0].w = transferred.x;
+ V[0].z = transferred.y;
+
+ transferred = BitTransferSigned(V[1].y, V[1].x);
+ V[1].y = transferred.x;
+ V[1].x = transferred.y;
+
+ transferred = BitTransferSigned(V[1].w, V[1].z);
+ V[1].w = transferred.x;
+ V[1].z = transferred.y;
+
+ if ((V[0].y + V[0].w + V[1].y) >= 0) {
+ ep1 = ClampByte(ivec4(V[1].z, V[0].x, V[0].z, V[1].x));
+ ep2 = ClampByte(ivec4(V[1].w + V[1].z, V[0].x + V[0].y, V[0].z + V[0].w, V[1].x + V[1].y));
} else {
- ep1 = ClampByte(BlueContract(v[6] + v[7], v[0] + v[1], v[2] + v[3], v[4] + v[5]));
- ep2 = ClampByte(BlueContract(v[6], v[0], v[2], v[4]));
+ ep1 = ClampByte(BlueContract(V[1].z + V[1].w, V[0].x + V[0].y, V[0].z + V[0].w, V[1].x + V[1].y));
+ ep2 = ClampByte(BlueContract(V[1].z, V[0].x, V[0].z, V[1].x));
}
break;
}
@@ -812,36 +727,34 @@ void ComputeEndpoints(out uvec4 ep1, out uvec4 ep2, uint color_endpoint_mode) {
}
uint UnquantizeTexelWeight(EncodingData val) {
- uint bitval = val.bit_value;
- uint bitlen = val.num_bits;
- uint A = ReplicateBitTo7((bitval & 1));
+ const uint encoding = Encoding(val);
+ const uint bitlen = NumBits(val);
+ const uint bitval = BitValue(val);
+ const uint A = ReplicateBitTo7((bitval & 1));
uint B = 0, C = 0, D = 0;
uint result = 0;
- switch (val.encoding) {
+ const uint bitlen_0_results[5] = {0, 16, 32, 48, 64};
+ switch (encoding) {
case JUST_BITS:
- result = FastReplicateTo6(bitval, bitlen);
- break;
+ return FastReplicateTo6(bitval, bitlen);
case TRIT: {
- D = val.quint_trit_value;
+ D = QuintTritValue(val);
switch (bitlen) {
- case 0: {
- uint results[3] = {0, 32, 63};
- result = results[D];
- break;
- }
+ case 0:
+ return bitlen_0_results[D * 2];
case 1: {
C = 50;
break;
}
case 2: {
C = 23;
- uint b = (bitval >> 1) & 1;
+ const uint b = (bitval >> 1) & 1;
B = (b << 6) | (b << 2) | b;
break;
}
case 3: {
C = 11;
- uint cb = (bitval >> 1) & 3;
+ const uint cb = (bitval >> 1) & 3;
B = (cb << 5) | cb;
break;
}
@@ -851,20 +764,17 @@ uint UnquantizeTexelWeight(EncodingData val) {
break;
}
case QUINT: {
- D = val.quint_trit_value;
+ D = QuintTritValue(val);
switch (bitlen) {
- case 0: {
- uint results[5] = {0, 16, 32, 47, 63};
- result = results[D];
- break;
- }
+ case 0:
+ return bitlen_0_results[D];
case 1: {
C = 28;
break;
}
case 2: {
C = 13;
- uint b = (bitval >> 1) & 1;
+ const uint b = (bitval >> 1) & 1;
B = (b << 6) | (b << 1);
break;
}
@@ -872,7 +782,7 @@ uint UnquantizeTexelWeight(EncodingData val) {
break;
}
}
- if (val.encoding != JUST_BITS && bitlen > 0) {
+ if (encoding != JUST_BITS && bitlen > 0) {
result = D * C + B;
result ^= A;
result = (A & 0x20) | (result >> 2);
@@ -883,61 +793,77 @@ uint UnquantizeTexelWeight(EncodingData val) {
return result;
}
-void UnquantizeTexelWeights(bool dual_plane, uvec2 size) {
- uint weight_idx = 0;
- uint unquantized[2][144];
- uint area = size.x * size.y;
- for (uint itr = 0; itr < texel_vector_index; itr++) {
- unquantized[0][weight_idx] = UnquantizeTexelWeight(texel_vector[itr]);
- if (dual_plane) {
- ++itr;
- unquantized[1][weight_idx] = UnquantizeTexelWeight(texel_vector[itr]);
- if (itr == texel_vector_index) {
- break;
- }
- }
- if (++weight_idx >= (area))
- break;
+void UnquantizeTexelWeights(uvec2 size, bool is_dual_plane) {
+ const uint num_planes = is_dual_plane ? 2 : 1;
+ const uint area = size.x * size.y;
+ const uint loop_count = min(result_index, area * num_planes);
+ for (uint itr = 0; itr < loop_count; ++itr) {
+ result_vector[itr] =
+ UnquantizeTexelWeight(GetEncodingFromVector(itr));
}
+}
+
+uint GetUnquantizedTexelWieght(uint offset_base, uint plane, bool is_dual_plane) {
+ const uint offset = is_dual_plane ? 2 * offset_base + plane : offset_base;
+ return result_vector[offset];
+}
+uvec4 GetUnquantizedWeightVector(uint t, uint s, uvec2 size, uint plane_index, bool is_dual_plane) {
const uint Ds = uint((block_dims.x * 0.5f + 1024) / (block_dims.x - 1));
const uint Dt = uint((block_dims.y * 0.5f + 1024) / (block_dims.y - 1));
- const uint k_plane_scale = dual_plane ? 2 : 1;
- for (uint plane = 0; plane < k_plane_scale; plane++) {
- for (uint t = 0; t < block_dims.y; t++) {
- for (uint s = 0; s < block_dims.x; s++) {
- uint cs = Ds * s;
- uint ct = Dt * t;
- uint gs = (cs * (size.x - 1) + 32) >> 6;
- uint gt = (ct * (size.y - 1) + 32) >> 6;
- uint js = gs >> 4;
- uint fs = gs & 0xF;
- uint jt = gt >> 4;
- uint ft = gt & 0x0F;
- uint w11 = (fs * ft + 8) >> 4;
- uint w10 = ft - w11;
- uint w01 = fs - w11;
- uint w00 = 16 - fs - ft + w11;
- uvec4 w = uvec4(w00, w01, w10, w11);
- uint v0 = jt * size.x + js;
-
- uvec4 p = uvec4(0);
- if (v0 < area) {
- p.x = unquantized[plane][v0];
- }
- if ((v0 + 1) < (area)) {
- p.y = unquantized[plane][v0 + 1];
- }
- if ((v0 + size.x) < (area)) {
- p.z = unquantized[plane][(v0 + size.x)];
- }
- if ((v0 + size.x + 1) < (area)) {
- p.w = unquantized[plane][(v0 + size.x + 1)];
- }
- unquantized_texel_weights[plane][t * block_dims.x + s] = (uint(dot(p, w)) + 8) >> 4;
- }
+ const uint area = size.x * size.y;
+
+ const uint cs = Ds * s;
+ const uint ct = Dt * t;
+ const uint gs = (cs * (size.x - 1) + 32) >> 6;
+ const uint gt = (ct * (size.y - 1) + 32) >> 6;
+ const uint js = gs >> 4;
+ const uint fs = gs & 0xF;
+ const uint jt = gt >> 4;
+ const uint ft = gt & 0x0F;
+ const uint w11 = (fs * ft + 8) >> 4;
+ const uint w10 = ft - w11;
+ const uint w01 = fs - w11;
+ const uint w00 = 16 - fs - ft + w11;
+ const uvec4 w = uvec4(w00, w01, w10, w11);
+ const uint v0 = jt * size.x + js;
+
+ uvec4 p0 = uvec4(0);
+ uvec4 p1 = uvec4(0);
+
+ if (v0 < area) {
+ const uint offset_base = v0;
+ p0.x = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
+ p1.x = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
+ }
+ if ((v0 + 1) < (area)) {
+ const uint offset_base = v0 + 1;
+ p0.y = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
+ p1.y = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
+ }
+ if ((v0 + size.x) < (area)) {
+ const uint offset_base = v0 + size.x;
+ p0.z = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
+ p1.z = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
+ }
+ if ((v0 + size.x + 1) < (area)) {
+ const uint offset_base = v0 + size.x + 1;
+ p0.w = GetUnquantizedTexelWieght(offset_base, 0, is_dual_plane);
+ p1.w = GetUnquantizedTexelWieght(offset_base, 1, is_dual_plane);
+ }
+
+ const uint primary_weight = (uint(dot(p0, w)) + 8) >> 4;
+
+ uvec4 weight_vec = uvec4(primary_weight);
+
+ if (is_dual_plane) {
+ const uint secondary_weight = (uint(dot(p1, w)) + 8) >> 4;
+ for (uint c = 0; c < 4; c++) {
+ const bool is_secondary = ((plane_index + 1u) & 3u) == c;
+ weight_vec[c] = is_secondary ? secondary_weight : primary_weight;
}
}
+ return weight_vec;
}
int FindLayout(uint mode) {
@@ -971,80 +897,96 @@ int FindLayout(uint mode) {
return 5;
}
-TexelWeightParams DecodeBlockInfo() {
- TexelWeightParams params = TexelWeightParams(uvec2(0), 0, false, false, false, false);
- uint mode = StreamBits(11);
+
+void FillError(ivec3 coord) {
+ for (uint j = 0; j < block_dims.y; j++) {
+ for (uint i = 0; i < block_dims.x; i++) {
+ imageStore(dest_image, coord + ivec3(i, j, 0), vec4(0.0, 0.0, 0.0, 0.0));
+ }
+ }
+}
+
+void FillVoidExtentLDR(ivec3 coord) {
+ SkipBits(52);
+ const uint r_u = StreamBits(16);
+ const uint g_u = StreamBits(16);
+ const uint b_u = StreamBits(16);
+ const uint a_u = StreamBits(16);
+ const float a = float(a_u) / 65535.0f;
+ const float r = float(r_u) / 65535.0f;
+ const float g = float(g_u) / 65535.0f;
+ const float b = float(b_u) / 65535.0f;
+ for (uint j = 0; j < block_dims.y; j++) {
+ for (uint i = 0; i < block_dims.x; i++) {
+ imageStore(dest_image, coord + ivec3(i, j, 0), vec4(r, g, b, a));
+ }
+ }
+}
+
+bool IsError(uint mode) {
if ((mode & 0x1ff) == 0x1fc) {
if ((mode & 0x200) != 0) {
- params.void_extent_hdr = true;
- } else {
- params.void_extent_ldr = true;
+ // params.void_extent_hdr = true;
+ return true;
}
if ((mode & 0x400) == 0 || StreamBits(1) == 0) {
- params.error_state = true;
+ return true;
}
- return params;
+ return false;
}
if ((mode & 0xf) == 0) {
- params.error_state = true;
- return params;
+ return true;
}
if ((mode & 3) == 0 && (mode & 0x1c0) == 0x1c0) {
- params.error_state = true;
- return params;
+ return true;
}
+ return false;
+}
+
+uvec2 DecodeBlockSize(uint mode) {
uint A, B;
- uint mode_layout = FindLayout(mode);
- switch (mode_layout) {
+ switch (FindLayout(mode)) {
case 0:
A = (mode >> 5) & 0x3;
B = (mode >> 7) & 0x3;
- params.size = uvec2(B + 4, A + 2);
- break;
+ return uvec2(B + 4, A + 2);
case 1:
A = (mode >> 5) & 0x3;
B = (mode >> 7) & 0x3;
- params.size = uvec2(B + 8, A + 2);
- break;
+ return uvec2(B + 8, A + 2);
case 2:
A = (mode >> 5) & 0x3;
B = (mode >> 7) & 0x3;
- params.size = uvec2(A + 2, B + 8);
- break;
+ return uvec2(A + 2, B + 8);
case 3:
A = (mode >> 5) & 0x3;
B = (mode >> 7) & 0x1;
- params.size = uvec2(A + 2, B + 6);
- break;
+ return uvec2(A + 2, B + 6);
case 4:
A = (mode >> 5) & 0x3;
B = (mode >> 7) & 0x1;
- params.size = uvec2(B + 2, A + 2);
- break;
+ return uvec2(B + 2, A + 2);
case 5:
A = (mode >> 5) & 0x3;
- params.size = uvec2(12, A + 2);
- break;
+ return uvec2(12, A + 2);
case 6:
A = (mode >> 5) & 0x3;
- params.size = uvec2(A + 2, 12);
- break;
+ return uvec2(A + 2, 12);
case 7:
- params.size = uvec2(6, 10);
- break;
+ return uvec2(6, 10);
case 8:
- params.size = uvec2(10, 6);
- break;
+ return uvec2(10, 6);
case 9:
A = (mode >> 5) & 0x3;
B = (mode >> 9) & 0x3;
- params.size = uvec2(A + 6, B + 6);
- break;
+ return uvec2(A + 6, B + 6);
default:
- params.error_state = true;
- break;
+ return uvec2(0);
}
- params.dual_plane = (mode_layout != 9) && ((mode & 0x400) != 0);
+}
+
+uint DecodeMaxWeight(uint mode) {
+ const uint mode_layout = FindLayout(mode);
uint weight_index = (mode & 0x10) != 0 ? 1 : 0;
if (mode_layout < 5) {
weight_index |= (mode & 0x3) << 1;
@@ -1053,64 +995,34 @@ TexelWeightParams DecodeBlockInfo() {
}
weight_index -= 2;
if ((mode_layout != 9) && ((mode & 0x200) != 0)) {
- const int max_weights[6] = int[6](7, 8, 9, 10, 11, 12);
- params.max_weight = max_weights[weight_index];
- } else {
- const int max_weights[6] = int[6](1, 2, 3, 4, 5, 6);
- params.max_weight = max_weights[weight_index];
- }
- return params;
-}
-
-void FillError(ivec3 coord) {
- for (uint j = 0; j < block_dims.y; j++) {
- for (uint i = 0; i < block_dims.x; i++) {
- imageStore(dest_image, coord + ivec3(i, j, 0), vec4(0.0, 0.0, 0.0, 0.0));
- }
- }
-}
-
-void FillVoidExtentLDR(ivec3 coord) {
- StreamBits(52);
- uint r_u = StreamBits(16);
- uint g_u = StreamBits(16);
- uint b_u = StreamBits(16);
- uint a_u = StreamBits(16);
- float a = float(a_u) / 65535.0f;
- float r = float(r_u) / 65535.0f;
- float g = float(g_u) / 65535.0f;
- float b = float(b_u) / 65535.0f;
- for (uint j = 0; j < block_dims.y; j++) {
- for (uint i = 0; i < block_dims.x; i++) {
- imageStore(dest_image, coord + ivec3(i, j, 0), vec4(r, g, b, a));
- }
+ weight_index += 6;
}
+ return weight_index + 1;
}
void DecompressBlock(ivec3 coord) {
- TexelWeightParams params = DecodeBlockInfo();
- if (params.error_state) {
- FillError(coord);
- return;
- }
- if (params.void_extent_hdr) {
+ uint mode = StreamBits(11);
+ if (IsError(mode)) {
FillError(coord);
return;
}
- if (params.void_extent_ldr) {
+ if ((mode & 0x1ff) == 0x1fc) {
+ // params.void_extent_ldr = true;
FillVoidExtentLDR(coord);
return;
}
- if ((params.size.x > block_dims.x) || (params.size.y > block_dims.y)) {
+ const uvec2 size_params = DecodeBlockSize(mode);
+ if ((size_params.x > block_dims.x) || (size_params.y > block_dims.y)) {
FillError(coord);
return;
}
- uint num_partitions = StreamBits(2) + 1;
- if (num_partitions > 4 || (num_partitions == 4 && params.dual_plane)) {
+ const uint num_partitions = StreamBits(2) + 1;
+ const uint mode_layout = FindLayout(mode);
+ const bool dual_plane = (mode_layout != 9) && ((mode & 0x400) != 0);
+ if (num_partitions > 4 || (num_partitions == 4 && dual_plane)) {
FillError(coord);
return;
}
- int plane_index = -1;
uint partition_index = 1;
uvec4 color_endpoint_mode = uvec4(0);
uint ced_pointer = 0;
@@ -1122,8 +1034,9 @@ void DecompressBlock(ivec3 coord) {
partition_index = StreamBits(10);
base_cem = StreamBits(6);
}
- uint base_mode = base_cem & 3;
- uint weight_bits = GetPackedBitSize(params.size, params.dual_plane, params.max_weight);
+ const uint base_mode = base_cem & 3;
+ const uint max_weight = DecodeMaxWeight(mode);
+ const uint weight_bits = GetPackedBitSize(size_params, dual_plane, max_weight);
uint remaining_bits = 128 - weight_bits - total_bitsread;
uint extra_cem_bits = 0;
if (base_mode > 0) {
@@ -1142,10 +1055,7 @@ void DecompressBlock(ivec3 coord) {
}
}
remaining_bits -= extra_cem_bits;
- uint plane_selector_bits = 0;
- if (params.dual_plane) {
- plane_selector_bits = 2;
- }
+ const uint plane_selector_bits = dual_plane ? 2 : 0;
remaining_bits -= plane_selector_bits;
if (remaining_bits > 128) {
// Bad data, more remaining bits than 4 bytes
@@ -1153,17 +1063,17 @@ void DecompressBlock(ivec3 coord) {
return;
}
// Read color data...
- uint color_data_bits = remaining_bits;
+ const uint color_data_bits = remaining_bits;
while (remaining_bits > 0) {
- int nb = int(min(remaining_bits, 32U));
- uint b = StreamBits(nb);
+ const int nb = int(min(remaining_bits, 32U));
+ const uint b = StreamBits(nb);
color_endpoint_data[ced_pointer] = uint(bitfieldExtract(b, 0, nb));
++ced_pointer;
remaining_bits -= nb;
}
- plane_index = int(StreamBits(plane_selector_bits));
+ const uint plane_index = uint(StreamBits(plane_selector_bits));
if (base_mode > 0) {
- uint extra_cem = StreamBits(extra_cem_bits);
+ const uint extra_cem = StreamBits(extra_cem_bits);
uint cem = (extra_cem << 6) | base_cem;
cem >>= 2;
uvec4 C = uvec4(0);
@@ -1185,70 +1095,80 @@ void DecompressBlock(ivec3 coord) {
color_endpoint_mode[i] |= M[i];
}
} else if (num_partitions > 1) {
- uint cem = base_cem >> 2;
+ const uint cem = base_cem >> 2;
for (uint i = 0; i < num_partitions; i++) {
color_endpoint_mode[i] = cem;
}
}
- DecodeColorValues(color_endpoint_mode, num_partitions, color_data_bits);
- uvec4 endpoints[4][2];
- for (uint i = 0; i < num_partitions; i++) {
- ComputeEndpoints(endpoints[i][0], endpoints[i][1], color_endpoint_mode[i]);
+ uvec4 endpoints0[4];
+ uvec4 endpoints1[4];
+ {
+ // This decode phase should at most push 32 elements into the vector
+ result_vector_max_index = 32;
+ uint color_values[32];
+ uint colvals_index = 0;
+ DecodeColorValues(color_endpoint_mode, num_partitions, color_data_bits, color_values);
+ for (uint i = 0; i < num_partitions; i++) {
+ ComputeEndpoints(endpoints0[i], endpoints1[i], color_endpoint_mode[i], color_values,
+ colvals_index);
+ }
}
+ color_endpoint_data = local_buff;
+ color_endpoint_data = bitfieldReverse(color_endpoint_data).wzyx;
+ const uint clear_byte_start = (weight_bits >> 3) + 1;
- texel_weight_data = local_buff;
- texel_weight_data = bitfieldReverse(texel_weight_data).wzyx;
- uint clear_byte_start =
- (GetPackedBitSize(params.size, params.dual_plane, params.max_weight) >> 3) + 1;
-
- uint byte_insert = ExtractBits(texel_weight_data, int(clear_byte_start - 1) * 8, 8) &
- uint(
- ((1 << (GetPackedBitSize(params.size, params.dual_plane, params.max_weight) % 8)) - 1));
- uint vec_index = (clear_byte_start - 1) >> 2;
- texel_weight_data[vec_index] =
- bitfieldInsert(texel_weight_data[vec_index], byte_insert, int((clear_byte_start - 1) % 4) * 8, 8);
+ const uint byte_insert = ExtractBits(color_endpoint_data, int(clear_byte_start - 1) * 8, 8) &
+ uint(((1 << (weight_bits % 8)) - 1));
+ const uint vec_index = (clear_byte_start - 1) >> 2;
+ color_endpoint_data[vec_index] = bitfieldInsert(color_endpoint_data[vec_index], byte_insert,
+ int((clear_byte_start - 1) % 4) * 8, 8);
for (uint i = clear_byte_start; i < 16; ++i) {
- uint idx = i >> 2;
- texel_weight_data[idx] = bitfieldInsert(texel_weight_data[idx], 0, int(i % 4) * 8, 8);
+ const uint idx = i >> 2;
+ color_endpoint_data[idx] = bitfieldInsert(color_endpoint_data[idx], 0, int(i % 4) * 8, 8);
}
- texel_flag = true; // use texel "vector" and bit stream in integer decoding
- DecodeIntegerSequence(params.max_weight, GetNumWeightValues(params.size, params.dual_plane));
- UnquantizeTexelWeights(params.dual_plane, params.size);
+ // Re-init vector variables for next decode phase
+ result_index = 0;
+ color_bitsread = 0;
+ result_limit_reached = false;
+ // The limit for the Unquantize phase, avoids decoding more data than needed.
+ result_vector_max_index = size_params.x * size_params.y;
+ if (dual_plane) {
+ result_vector_max_index *= 2;
+ }
+ DecodeIntegerSequence(max_weight, GetNumWeightValues(size_params, dual_plane));
+
+ UnquantizeTexelWeights(size_params, dual_plane);
for (uint j = 0; j < block_dims.y; j++) {
for (uint i = 0; i < block_dims.x; i++) {
uint local_partition = 0;
if (num_partitions > 1) {
- local_partition = Select2DPartition(partition_index, i, j, num_partitions,
- (block_dims.y * block_dims.x) < 32);
- }
- vec4 p;
- uvec4 C0 = ReplicateByteTo16(endpoints[local_partition][0]);
- uvec4 C1 = ReplicateByteTo16(endpoints[local_partition][1]);
- uvec4 plane_vec = uvec4(0);
- uvec4 weight_vec = uvec4(0);
- for (uint c = 0; c < 4; c++) {
- if (params.dual_plane && (((plane_index + 1) & 3) == c)) {
- plane_vec[c] = 1;
- }
- weight_vec[c] = unquantized_texel_weights[plane_vec[c]][j * block_dims.x + i];
+ local_partition = Select2DPartition(partition_index, i, j, num_partitions);
}
- vec4 Cf = vec4((C0 * (uvec4(64) - weight_vec) + C1 * weight_vec + uvec4(32)) / 64);
- p = (Cf / 65535.0);
+ const uvec4 C0 = ReplicateByteTo16(endpoints0[local_partition]);
+ const uvec4 C1 = ReplicateByteTo16(endpoints1[local_partition]);
+ const uvec4 weight_vec = GetUnquantizedWeightVector(j, i, size_params, plane_index, dual_plane);
+ const vec4 Cf =
+ vec4((C0 * (uvec4(64) - weight_vec) + C1 * weight_vec + uvec4(32)) / 64);
+ const vec4 p = (Cf / 65535.0f);
imageStore(dest_image, coord + ivec3(i, j, 0), p.gbar);
}
}
}
+uint SwizzleOffset(uvec2 pos) {
+ const uint x = pos.x;
+ const uint y = pos.y;
+ return ((x % 64) / 32) * 256 + ((y % 8) / 2) * 64 +
+ ((x % 32) / 16) * 32 + (y % 2) * 16 + (x % 16);
+}
+
void main() {
uvec3 pos = gl_GlobalInvocationID;
pos.x <<= BYTES_PER_BLOCK_LOG2;
-
- // Read as soon as possible due to its latency
const uint swizzle = SwizzleOffset(pos.xy);
-
const uint block_y = pos.y >> GOB_SIZE_Y_SHIFT;
uint offset = 0;
@@ -1262,8 +1182,6 @@ void main() {
if (any(greaterThanEqual(coord, imageSize(dest_image)))) {
return;
}
- current_index = 0;
- bitsread = 0;
local_buff = astc_data[offset / 16];
DecompressBlock(coord);
}
diff --git a/src/video_core/host_shaders/vulkan_depthstencil_clear.frag b/src/video_core/host_shaders/vulkan_depthstencil_clear.frag
new file mode 100644
index 000000000..1ac177c7e
--- /dev/null
+++ b/src/video_core/host_shaders/vulkan_depthstencil_clear.frag
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#version 460 core
+
+layout (push_constant) uniform PushConstants {
+ vec4 clear_depth;
+};
+
+void main() {
+ gl_FragDepth = clear_depth.x;
+}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index aadd6967c..1ba31be88 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -1335,7 +1335,8 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
}
const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
- const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
+ const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
+ : VideoCommon::ObtainBufferOperation::MarkAsWritten;
const auto [buffer, offset] =
buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
@@ -1344,8 +1345,12 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const std::span copy_span{&copy, 1};
if constexpr (IS_IMAGE_UPLOAD) {
+ texture_cache.PrepareImage(image_id, true, false);
image->UploadMemory(buffer->Handle(), offset, copy_span);
} else {
+ if (offset % BytesPerBlock(image->info.format)) {
+ return false;
+ }
texture_cache.DownloadImageIntoBuffer(image, buffer->Handle(), offset, copy_span,
buffer_operand.address, buffer_size);
}
diff --git a/src/video_core/renderer_opengl/util_shaders.cpp b/src/video_core/renderer_opengl/util_shaders.cpp
index 544982d18..c437013e6 100644
--- a/src/video_core/renderer_opengl/util_shaders.cpp
+++ b/src/video_core/renderer_opengl/util_shaders.cpp
@@ -68,6 +68,7 @@ void UtilShaders::ASTCDecode(Image& image, const StagingBufferMap& map,
std::span<const VideoCommon::SwizzleParameters> swizzles) {
static constexpr GLuint BINDING_INPUT_BUFFER = 0;
static constexpr GLuint BINDING_OUTPUT_IMAGE = 0;
+ program_manager.LocalMemoryWarmup();
const Extent2D tile_size{
.width = VideoCore::Surface::DefaultBlockWidth(image.info.format),
diff --git a/src/video_core/renderer_vulkan/blit_image.cpp b/src/video_core/renderer_vulkan/blit_image.cpp
index f74ae972e..1032c9d12 100644
--- a/src/video_core/renderer_vulkan/blit_image.cpp
+++ b/src/video_core/renderer_vulkan/blit_image.cpp
@@ -16,6 +16,7 @@
#include "video_core/host_shaders/vulkan_blit_depth_stencil_frag_spv.h"
#include "video_core/host_shaders/vulkan_color_clear_frag_spv.h"
#include "video_core/host_shaders/vulkan_color_clear_vert_spv.h"
+#include "video_core/host_shaders/vulkan_depthstencil_clear_frag_spv.h"
#include "video_core/renderer_vulkan/blit_image.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
@@ -428,6 +429,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
blit_depth_stencil_frag(BuildShader(device, VULKAN_BLIT_DEPTH_STENCIL_FRAG_SPV)),
clear_color_vert(BuildShader(device, VULKAN_COLOR_CLEAR_VERT_SPV)),
clear_color_frag(BuildShader(device, VULKAN_COLOR_CLEAR_FRAG_SPV)),
+ clear_stencil_frag(BuildShader(device, VULKAN_DEPTHSTENCIL_CLEAR_FRAG_SPV)),
convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)),
convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)),
convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)),
@@ -593,6 +595,28 @@ void BlitImageHelper::ClearColor(const Framebuffer* dst_framebuffer, u8 color_ma
scheduler.InvalidateState();
}
+void BlitImageHelper::ClearDepthStencil(const Framebuffer* dst_framebuffer, bool depth_clear,
+ f32 clear_depth, u8 stencil_mask, u32 stencil_ref,
+ u32 stencil_compare_mask, const Region2D& dst_region) {
+ const BlitDepthStencilPipelineKey key{
+ .renderpass = dst_framebuffer->RenderPass(),
+ .depth_clear = depth_clear,
+ .stencil_mask = stencil_mask,
+ .stencil_compare_mask = stencil_compare_mask,
+ .stencil_ref = stencil_ref,
+ };
+ const VkPipeline pipeline = FindOrEmplaceClearStencilPipeline(key);
+ const VkPipelineLayout layout = *clear_color_pipeline_layout;
+ scheduler.RequestRenderpass(dst_framebuffer);
+ scheduler.Record([pipeline, layout, clear_depth, dst_region](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ BindBlitState(cmdbuf, dst_region);
+ cmdbuf.PushConstants(layout, VK_SHADER_STAGE_FRAGMENT_BIT, clear_depth);
+ cmdbuf.Draw(3, 1, 0, 0);
+ });
+ scheduler.InvalidateState();
+}
+
void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
const VkPipelineLayout layout = *one_texture_pipeline_layout;
@@ -820,6 +844,61 @@ VkPipeline BlitImageHelper::FindOrEmplaceClearColorPipeline(const BlitImagePipel
return *clear_color_pipelines.back();
}
+VkPipeline BlitImageHelper::FindOrEmplaceClearStencilPipeline(
+ const BlitDepthStencilPipelineKey& key) {
+ const auto it = std::ranges::find(clear_stencil_keys, key);
+ if (it != clear_stencil_keys.end()) {
+ return *clear_stencil_pipelines[std::distance(clear_stencil_keys.begin(), it)];
+ }
+ clear_stencil_keys.push_back(key);
+ const std::array stages = MakeStages(*clear_color_vert, *clear_stencil_frag);
+ const auto stencil = VkStencilOpState{
+ .failOp = VK_STENCIL_OP_KEEP,
+ .passOp = VK_STENCIL_OP_REPLACE,
+ .depthFailOp = VK_STENCIL_OP_KEEP,
+ .compareOp = VK_COMPARE_OP_ALWAYS,
+ .compareMask = key.stencil_compare_mask,
+ .writeMask = key.stencil_mask,
+ .reference = key.stencil_ref,
+ };
+ const VkPipelineDepthStencilStateCreateInfo depth_stencil_ci{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .depthTestEnable = VK_FALSE,
+ .depthWriteEnable = key.depth_clear,
+ .depthCompareOp = VK_COMPARE_OP_ALWAYS,
+ .depthBoundsTestEnable = VK_FALSE,
+ .stencilTestEnable = VK_TRUE,
+ .front = stencil,
+ .back = stencil,
+ .minDepthBounds = 0.0f,
+ .maxDepthBounds = 0.0f,
+ };
+ clear_stencil_pipelines.push_back(device.GetLogical().CreateGraphicsPipeline({
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .stageCount = static_cast<u32>(stages.size()),
+ .pStages = stages.data(),
+ .pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .pTessellationState = nullptr,
+ .pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .pMultisampleState = &PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .pDepthStencilState = &depth_stencil_ci,
+ .pColorBlendState = &PIPELINE_COLOR_BLEND_STATE_GENERIC_CREATE_INFO,
+ .pDynamicState = &PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .layout = *clear_color_pipeline_layout,
+ .renderPass = key.renderpass,
+ .subpass = 0,
+ .basePipelineHandle = VK_NULL_HANDLE,
+ .basePipelineIndex = 0,
+ }));
+ return *clear_stencil_pipelines.back();
+}
+
void BlitImageHelper::ConvertPipeline(vk::Pipeline& pipeline, VkRenderPass renderpass,
bool is_target_depth) {
if (pipeline) {
diff --git a/src/video_core/renderer_vulkan/blit_image.h b/src/video_core/renderer_vulkan/blit_image.h
index 2976a7d91..dcfe217aa 100644
--- a/src/video_core/renderer_vulkan/blit_image.h
+++ b/src/video_core/renderer_vulkan/blit_image.h
@@ -27,6 +27,16 @@ struct BlitImagePipelineKey {
Tegra::Engines::Fermi2D::Operation operation;
};
+struct BlitDepthStencilPipelineKey {
+ constexpr auto operator<=>(const BlitDepthStencilPipelineKey&) const noexcept = default;
+
+ VkRenderPass renderpass;
+ bool depth_clear;
+ u8 stencil_mask;
+ u32 stencil_compare_mask;
+ u32 stencil_ref;
+};
+
class BlitImageHelper {
public:
explicit BlitImageHelper(const Device& device, Scheduler& scheduler,
@@ -64,6 +74,10 @@ public:
void ClearColor(const Framebuffer* dst_framebuffer, u8 color_mask,
const std::array<f32, 4>& clear_color, const Region2D& dst_region);
+ void ClearDepthStencil(const Framebuffer* dst_framebuffer, bool depth_clear, f32 clear_depth,
+ u8 stencil_mask, u32 stencil_ref, u32 stencil_compare_mask,
+ const Region2D& dst_region);
+
private:
void Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
const ImageView& src_image_view);
@@ -76,6 +90,8 @@ private:
[[nodiscard]] VkPipeline FindOrEmplaceDepthStencilPipeline(const BlitImagePipelineKey& key);
[[nodiscard]] VkPipeline FindOrEmplaceClearColorPipeline(const BlitImagePipelineKey& key);
+ [[nodiscard]] VkPipeline FindOrEmplaceClearStencilPipeline(
+ const BlitDepthStencilPipelineKey& key);
void ConvertPipeline(vk::Pipeline& pipeline, VkRenderPass renderpass, bool is_target_depth);
@@ -108,6 +124,7 @@ private:
vk::ShaderModule blit_depth_stencil_frag;
vk::ShaderModule clear_color_vert;
vk::ShaderModule clear_color_frag;
+ vk::ShaderModule clear_stencil_frag;
vk::ShaderModule convert_depth_to_float_frag;
vk::ShaderModule convert_float_to_depth_frag;
vk::ShaderModule convert_abgr8_to_d24s8_frag;
@@ -122,6 +139,8 @@ private:
std::vector<vk::Pipeline> blit_depth_stencil_pipelines;
std::vector<BlitImagePipelineKey> clear_color_keys;
std::vector<vk::Pipeline> clear_color_pipelines;
+ std::vector<BlitDepthStencilPipelineKey> clear_stencil_keys;
+ std::vector<vk::Pipeline> clear_stencil_pipelines;
vk::Pipeline convert_d32_to_r32_pipeline;
vk::Pipeline convert_r32_to_d32_pipeline;
vk::Pipeline convert_d16_to_r16_pipeline;
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index a8540339d..35bf80ea3 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -126,7 +126,7 @@ struct FormatTuple {
{VK_FORMAT_A1R5G5B5_UNORM_PACK16, Attachable}, // A1R5G5B5_UNORM
{VK_FORMAT_A2B10G10R10_UNORM_PACK32, Attachable | Storage}, // A2B10G10R10_UNORM
{VK_FORMAT_A2B10G10R10_UINT_PACK32, Attachable | Storage}, // A2B10G10R10_UINT
- {VK_FORMAT_A2R10G10B10_UNORM_PACK32, Attachable | Storage}, // A2R10G10B10_UNORM
+ {VK_FORMAT_A2R10G10B10_UNORM_PACK32, Attachable}, // A2R10G10B10_UNORM
{VK_FORMAT_A1R5G5B5_UNORM_PACK16, Attachable}, // A1B5G5R5_UNORM (flipped with swizzle)
{VK_FORMAT_R5G5B5A1_UNORM_PACK16}, // A5B5G5R1_UNORM (specially swizzled)
{VK_FORMAT_R8_UNORM, Attachable | Storage}, // R8_UNORM
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index aa59889bd..032f694bc 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -428,15 +428,27 @@ void RasterizerVulkan::Clear(u32 layer_count) {
if (aspect_flags == 0) {
return;
}
- scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil,
- clear_rect, aspect_flags](vk::CommandBuffer cmdbuf) {
- VkClearAttachment attachment;
- attachment.aspectMask = aspect_flags;
- attachment.colorAttachment = 0;
- attachment.clearValue.depthStencil.depth = clear_depth;
- attachment.clearValue.depthStencil.stencil = clear_stencil;
- cmdbuf.ClearAttachments(attachment, clear_rect);
- });
+
+ if (use_stencil && regs.stencil_front_mask != 0xFF && regs.stencil_front_mask != 0) {
+ Region2D dst_region = {
+ Offset2D{.x = clear_rect.rect.offset.x, .y = clear_rect.rect.offset.y},
+ Offset2D{.x = clear_rect.rect.offset.x + static_cast<s32>(clear_rect.rect.extent.width),
+ .y = clear_rect.rect.offset.y +
+ static_cast<s32>(clear_rect.rect.extent.height)}};
+ blit_image.ClearDepthStencil(framebuffer, use_depth, regs.clear_depth,
+ static_cast<u8>(regs.stencil_front_mask), regs.clear_stencil,
+ regs.stencil_front_func_mask, dst_region);
+ } else {
+ scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil,
+ clear_rect, aspect_flags](vk::CommandBuffer cmdbuf) {
+ VkClearAttachment attachment;
+ attachment.aspectMask = aspect_flags;
+ attachment.colorAttachment = 0;
+ attachment.clearValue.depthStencil.depth = clear_depth;
+ attachment.clearValue.depthStencil.stencil = clear_stencil;
+ cmdbuf.ClearAttachments(attachment, clear_rect);
+ });
+ }
}
void RasterizerVulkan::DispatchCompute() {
@@ -830,7 +842,8 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
}
const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
- const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
+ const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
+ : VideoCommon::ObtainBufferOperation::MarkAsWritten;
const auto [buffer, offset] =
buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
@@ -839,8 +852,12 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const std::span copy_span{&copy, 1};
if constexpr (IS_IMAGE_UPLOAD) {
+ texture_cache.PrepareImage(image_id, true, false);
image->UploadMemory(buffer->Handle(), offset, copy_span);
} else {
+ if (offset % BytesPerBlock(image->info.format)) {
+ return false;
+ }
texture_cache.DownloadImageIntoBuffer(image, buffer->Handle(), offset, copy_span,
buffer_operand.address, buffer_size);
}
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index e9ec91265..a40825c9f 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -243,6 +243,9 @@ public:
/// Create channel state.
void CreateChannel(Tegra::Control::ChannelState& channel) final override;
+ /// Prepare an image to be used
+ void PrepareImage(ImageId image_id, bool is_modification, bool invalidate);
+
std::recursive_mutex mutex;
private:
@@ -387,9 +390,6 @@ private:
/// Synchronize image aliases, copying data if needed
void SynchronizeAliases(ImageId image_id);
- /// Prepare an image to be used
- void PrepareImage(ImageId image_id, bool is_modification, bool invalidate);
-
/// Prepare an image view to be used
void PrepareImageView(ImageViewId image_view_id, bool is_modification, bool invalidate);
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index adde96aa5..617417040 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -71,6 +71,11 @@ constexpr std::array R8G8B8_SSCALED{
VK_FORMAT_UNDEFINED,
};
+constexpr std::array VK_FORMAT_R32G32B32_SFLOAT{
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ VK_FORMAT_UNDEFINED,
+};
+
} // namespace Alternatives
enum class NvidiaArchitecture {
@@ -103,6 +108,8 @@ constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
return Alternatives::R16G16B16_SSCALED.data();
case VK_FORMAT_R8G8B8_SSCALED:
return Alternatives::R8G8B8_SSCALED.data();
+ case VK_FORMAT_R32G32B32_SFLOAT:
+ return Alternatives::VK_FORMAT_R32G32B32_SFLOAT.data();
default:
return nullptr;
}
@@ -130,6 +137,7 @@ std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(vk::Physica
VK_FORMAT_A2B10G10R10_UINT_PACK32,
VK_FORMAT_A2B10G10R10_UNORM_PACK32,
VK_FORMAT_A2B10G10R10_USCALED_PACK32,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
VK_FORMAT_A8B8G8R8_SINT_PACK32,
VK_FORMAT_A8B8G8R8_SNORM_PACK32,
VK_FORMAT_A8B8G8R8_SRGB_PACK32,
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 97ae9e49a..a9d035f3d 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -2535,8 +2535,8 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
return;
}
- FileSys::VirtualFile file;
- if (loader->ReadRomFS(file) != Loader::ResultStatus::Success) {
+ FileSys::VirtualFile base_romfs;
+ if (loader->ReadRomFS(base_romfs) != Loader::ResultStatus::Success) {
failed();
return;
}
@@ -2549,6 +2549,14 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
return;
}
+ const auto type = *romfs_title_id == program_id ? FileSys::ContentRecordType::Program
+ : FileSys::ContentRecordType::Data;
+ const auto base_nca = installed.GetEntry(*romfs_title_id, type);
+ if (!base_nca) {
+ failed();
+ return;
+ }
+
const auto dump_dir =
target == DumpRomFSTarget::Normal
? Common::FS::GetYuzuPath(Common::FS::YuzuPath::DumpDir)
@@ -2560,12 +2568,10 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
FileSys::VirtualFile romfs;
if (*romfs_title_id == program_id) {
- const u64 ivfc_offset = loader->ReadRomFSIVFCOffset();
const FileSys::PatchManager pm{program_id, system->GetFileSystemController(), installed};
- romfs =
- pm.PatchRomFS(file, ivfc_offset, FileSys::ContentRecordType::Program, nullptr, false);
+ romfs = pm.PatchRomFS(base_nca.get(), base_romfs, type, nullptr, false);
} else {
- romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS();
+ romfs = installed.GetEntry(*romfs_title_id, type)->GetRomFS();
}
const auto extracted = FileSys::ExtractRomFS(romfs, FileSys::RomFSExtractionType::Full);